diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -165,8 +165,9 @@ [LLVMMatchType<0>, LLVMPointerType>, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, - llvm_anyint_ty], - [NoCapture>, IntrReadMem]>, RISCVVIntrinsic; + llvm_anyint_ty, LLVMMatchType<1>], + [NoCapture>, ImmArg>, IntrReadMem]>, + RISCVVIntrinsic; // For unit stride fault-only-first load with mask // Input: (maskedoff, pointer, mask, vl) // Output: (data, vl) @@ -177,8 +178,8 @@ [LLVMMatchType<0>, LLVMPointerType>, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, - LLVMMatchType<1>], - [NoCapture>]>, RISCVVIntrinsic; + LLVMMatchType<1>, LLVMMatchType<1>], + [NoCapture>, ImmArg>]>, RISCVVIntrinsic; // For strided load // Input: (pointer, stride, vl) class RISCVSLoad @@ -192,8 +193,10 @@ : Intrinsic<[llvm_anyvector_ty ], [LLVMMatchType<0>, LLVMPointerType>, llvm_anyint_ty, - LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>], - [NoCapture>, IntrReadMem]>, RISCVVIntrinsic; + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>, + LLVMMatchType<1>], + [NoCapture>, ImmArg>, IntrReadMem]>, + RISCVVIntrinsic; // For indexed load // Input: (pointer, index, vl) class RISCVILoad @@ -207,8 +210,10 @@ : Intrinsic<[llvm_anyvector_ty ], [LLVMMatchType<0>, LLVMPointerType>, llvm_anyvector_ty, - LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty], - [NoCapture>, IntrReadMem]>, RISCVVIntrinsic; + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, + LLVMMatchType<2>], + [NoCapture>, ImmArg>, IntrReadMem]>, + RISCVVIntrinsic; // For unit stride store // Input: (vector_in, pointer, vl) class RISCVUSStore @@ -267,6 +272,12 @@ // For destination vector type is the same as first source vector (with mask). // Input: (vector_in, mask, vl) class RISCVUnaryAAMask + : Intrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, + LLVMMatchType<1>], + [ImmArg>, IntrNoMem]>, RISCVVIntrinsic; + class RISCVUnaryAAMaskNoTA : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty], @@ -288,8 +299,9 @@ class RISCVRGatherVVMask : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>, LLVMVectorOfBitcastsToInt<0>, - LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty], - [IntrNoMem]>, RISCVVIntrinsic; + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, + LLVMMatchType<1>], + [ImmArg>, IntrNoMem]>, RISCVVIntrinsic; // Input: (vector_in, int16_vector_in, vl) class RISCVRGatherEI16VVNoMask : Intrinsic<[llvm_anyvector_ty], @@ -302,8 +314,9 @@ : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>, LLVMScalarOrSameVectorWidth<0, llvm_i16_ty>, - LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty], - [IntrNoMem]>, RISCVVIntrinsic; + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, + LLVMMatchType<1>], + [ImmArg>, IntrNoMem]>, RISCVVIntrinsic; // For destination vector type is the same as first source vector, and the // second operand is XLen. // Input: (vector_in, xlen_in, vl) @@ -318,8 +331,9 @@ class RISCVGatherVXMask : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty, - LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>], - [IntrNoMem]>, RISCVVIntrinsic { + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>, + LLVMMatchType<1>], + [ImmArg>, IntrNoMem]>, RISCVVIntrinsic { } // For destination vector type is the same as first source vector. // Input: (vector_in, vector_in/scalar_in, vl) @@ -334,8 +348,9 @@ class RISCVBinaryAAXMask : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, - LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty], - [IntrNoMem]>, RISCVVIntrinsic { + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, + LLVMMatchType<2>], + [ImmArg>, IntrNoMem]>, RISCVVIntrinsic { let SplatOperand = 3; } // For destination vector type is the same as first source vector. The @@ -351,8 +366,9 @@ class RISCVBinaryAAShiftMask : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, - LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty], - [IntrNoMem]>, RISCVVIntrinsic; + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, + LLVMMatchType<2>], + [ImmArg>, IntrNoMem]>, RISCVVIntrinsic; // For destination vector type is NOT the same as first source vector. // Input: (vector_in, vector_in/scalar_in, vl) class RISCVBinaryABXNoMask @@ -366,8 +382,9 @@ class RISCVBinaryABXMask : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty, - LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty], - [IntrNoMem]>, RISCVVIntrinsic { + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, + LLVMMatchType<3>], + [ImmArg>, IntrNoMem]>, RISCVVIntrinsic { let SplatOperand = 3; } // For destination vector type is NOT the same as first source vector. The @@ -383,8 +400,9 @@ class RISCVBinaryABShiftMask : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty, - LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty], - [IntrNoMem]>, RISCVVIntrinsic; + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, + LLVMMatchType<3>], + [ImmArg>, IntrNoMem]>, RISCVVIntrinsic; // For binary operations with V0 as input. // Input: (vector_in, vector_in/scalar_in, V0, vl) class RISCVBinaryWithV0 @@ -465,8 +483,9 @@ class RISCVSaturatingBinaryAAXMask : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, - LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty], - [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic { + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, + LLVMMatchType<2>], + [ImmArg>, IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic { let SplatOperand = 3; } // For Saturating binary operations. @@ -484,8 +503,9 @@ class RISCVSaturatingBinaryAAShiftMask : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, - LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty], - [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic; + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, + LLVMMatchType<2>], + [ImmArg>, IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic; // For Saturating binary operations. // The destination vector type is NOT the same as first source vector. // The second source operand matches the destination type or is an XLen scalar. @@ -501,8 +521,9 @@ class RISCVSaturatingBinaryABShiftMask : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty, - LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty], - [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic; + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, + LLVMMatchType<3>], + [ImmArg>, IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic; class RISCVTernaryAAAXNoMask : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty, @@ -584,8 +605,8 @@ : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, llvm_anyvector_ty, LLVMScalarOrSameVectorWidth<1, llvm_i1_ty>, - llvm_anyint_ty], - [IntrNoMem]>, RISCVVIntrinsic; + llvm_anyint_ty, LLVMMatchType<2>], + [ImmArg>, IntrNoMem]>, RISCVVIntrinsic; // For unary operations with the same vector type in/out without mask // Output: (vector) // Input: (vector_in, vl) @@ -618,8 +639,9 @@ class RISCVConversionMask : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, llvm_anyvector_ty, - LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty], - [IntrNoMem]>, RISCVVIntrinsic; + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, + LLVMMatchType<2>], + [ImmArg>, IntrNoMem]>, RISCVVIntrinsic; // For atomic operations without mask // Input: (base, index, value, vl) class RISCVAMONoMask @@ -650,8 +672,9 @@ !listconcat(!listsplat(LLVMMatchType<0>, nf), [LLVMPointerToElt<0>, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, - llvm_anyint_ty]), - [NoCapture>, IntrReadMem]>, RISCVVIntrinsic; + llvm_anyint_ty, LLVMMatchType<1>]), + [ImmArg>, NoCapture>, IntrReadMem]>, + RISCVVIntrinsic; // For unit stride fault-only-first segment load // Input: (pointer, vl) @@ -674,8 +697,9 @@ !listconcat(!listsplat(LLVMMatchType<0>, nf), [LLVMPointerToElt<0>, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, - LLVMMatchType<1>]), - [NoCapture>]>, RISCVVIntrinsic; + LLVMMatchType<1>, LLVMMatchType<1>]), + [ImmArg>, NoCapture>]>, + RISCVVIntrinsic; // For stride segment load // Input: (pointer, offset, vl) @@ -693,8 +717,9 @@ [LLVMPointerToElt<0>, llvm_anyint_ty, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, - LLVMMatchType<1>]), - [NoCapture>, IntrReadMem]>, RISCVVIntrinsic; + LLVMMatchType<1>, LLVMMatchType<1>]), + [ImmArg>, NoCapture>, IntrReadMem]>, + RISCVVIntrinsic; // For indexed segment load // Input: (pointer, index, vl) @@ -712,8 +737,9 @@ [LLVMPointerToElt<0>, llvm_anyvector_ty, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, - llvm_anyint_ty]), - [NoCapture>, IntrReadMem]>, RISCVVIntrinsic; + llvm_anyint_ty, LLVMMatchType<2>]), + [ImmArg>, NoCapture>, IntrReadMem]>, + RISCVVIntrinsic; // For unit stride segment store // Input: (value, pointer, vl) @@ -1049,7 +1075,7 @@ defm vssubu : RISCVSaturatingBinaryAAX; defm vssub : RISCVSaturatingBinaryAAX; - def int_riscv_vmerge : RISCVBinaryWithV0; + defm vmerge : RISCVBinaryWithV0; def int_riscv_vmv_v_v : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, llvm_anyint_ty], @@ -1124,7 +1150,7 @@ defm vrgather_vx : RISCVRGatherVX; defm vrgatherei16_vv : RISCVRGatherEI16VV; - def "int_riscv_vcompress" : RISCVUnaryAAMask; + def "int_riscv_vcompress" : RISCVUnaryAAMaskNoTA; defm vaaddu : RISCVSaturatingBinaryAAX; defm vaadd : RISCVSaturatingBinaryAAX; diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h --- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h +++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h @@ -103,6 +103,11 @@ LMUL_F2 }; +enum { + TAIL_UNDISTURBED = 0, + TAIL_AGNOSTIC = 1, +}; + // Helper functions to read TSFlags. /// \returns the format of the instruction. static inline unsigned getFormat(uint64_t TSFlags) { diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h @@ -79,7 +79,7 @@ const SDLoc &DL, unsigned CurOp, bool IsMasked, bool IsStridedOrIndexed, SmallVectorImpl &Operands, - MVT *IndexVT = nullptr); + bool IsLoad = false, MVT *IndexVT = nullptr); void selectVLSEG(SDNode *Node, bool IsMasked, bool IsStrided); void selectVLSEGFF(SDNode *Node, bool IsMasked); diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -213,7 +213,7 @@ void RISCVDAGToDAGISel::addVectorLoadStoreOperands( SDNode *Node, unsigned Log2SEW, const SDLoc &DL, unsigned CurOp, bool IsMasked, bool IsStridedOrIndexed, SmallVectorImpl &Operands, - MVT *IndexVT) { + bool IsLoad, MVT *IndexVT) { SDValue Chain = Node->getOperand(0); SDValue Glue; @@ -242,6 +242,14 @@ SDValue SEWOp = CurDAG->getTargetConstant(Log2SEW, DL, XLenVT); Operands.push_back(SEWOp); + // Masked load has the tail policy argument. + if (IsMasked && IsLoad) { + // Policy must be a constant. + uint64_t Policy = Node->getConstantOperandVal(CurOp++); + SDValue PolicyOp = CurDAG->getTargetConstant(Policy, DL, XLenVT); + Operands.push_back(PolicyOp); + } + Operands.push_back(Chain); // Chain. if (Glue) Operands.push_back(Glue); @@ -266,7 +274,7 @@ } addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided, - Operands); + Operands, /*IsLoad=*/true); const RISCV::VLSEGPseudo *P = RISCV::getVLSEGPseudo(NF, IsMasked, IsStrided, /*FF*/ false, Log2SEW, @@ -307,7 +315,8 @@ } addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, - /*IsStridedOrIndexed*/ false, Operands); + /*IsStridedOrIndexed*/ false, Operands, + /*IsLoad=*/true); const RISCV::VLSEGPseudo *P = RISCV::getVLSEGPseudo(NF, IsMasked, /*Strided*/ false, /*FF*/ true, @@ -352,7 +361,8 @@ MVT IndexVT; addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, - /*IsStridedOrIndexed*/ true, Operands, &IndexVT); + /*IsStridedOrIndexed*/ true, Operands, + /*IsLoad=*/true, &IndexVT); assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() && "Element count mismatch"); @@ -429,7 +439,8 @@ MVT IndexVT; addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, - /*IsStridedOrIndexed*/ true, Operands, &IndexVT); + /*IsStridedOrIndexed*/ true, Operands, + /*IsLoad=*/false, &IndexVT); assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() && "Element count mismatch"); @@ -1025,7 +1036,7 @@ MVT IndexVT; addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, /*IsStridedOrIndexed*/ true, Operands, - &IndexVT); + /*IsLoad=*/true, &IndexVT); assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() && "Element count mismatch"); @@ -1064,7 +1075,7 @@ Operands.push_back(Node->getOperand(CurOp++)); addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided, - Operands); + Operands, /*IsLoad=*/true); RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); const RISCV::VLEPseudo *P = @@ -1092,7 +1103,8 @@ Operands.push_back(Node->getOperand(CurOp++)); addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, - /*IsStridedOrIndexed*/ false, Operands); + /*IsStridedOrIndexed*/ false, Operands, + /*IsLoad=*/true); RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); const RISCV::VLEPseudo *P = @@ -1214,7 +1226,7 @@ MVT IndexVT; addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, /*IsStridedOrIndexed*/ true, Operands, - &IndexVT); + /*IsLoad=*/false, &IndexVT); assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() && "Element count mismatch"); diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -3737,7 +3737,7 @@ case Intrinsic::riscv_vslide1down_mask: { // We need to special case these when the scalar is larger than XLen. unsigned NumOps = Op.getNumOperands(); - bool IsMasked = NumOps == 6; + bool IsMasked = NumOps == 7; unsigned OpOffset = IsMasked ? 1 : 0; SDValue Scalar = Op.getOperand(2 + OpOffset); if (Scalar.getValueType().bitsLE(XLenVT)) @@ -3762,7 +3762,7 @@ DAG.getConstant(1, DL, XLenVT)); // Double the VL since we halved SEW. - SDValue VL = Op.getOperand(NumOps - 1); + SDValue VL = Op.getOperand(NumOps - (1 + OpOffset)); SDValue I32VL = DAG.getNode(ISD::SHL, DL, XLenVT, VL, DAG.getConstant(1, DL, XLenVT)); @@ -3791,7 +3791,7 @@ return Vec; // Apply mask after the operation. - SDValue Mask = Op.getOperand(NumOps - 2); + SDValue Mask = Op.getOperand(NumOps - 3); SDValue MaskedOff = Op.getOperand(1); return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, Mask, Vec, MaskedOff, VL); } @@ -4454,8 +4454,9 @@ SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other}); SDValue IntID = DAG.getTargetConstant(Intrinsic::riscv_vle_mask, DL, XLenVT); - SDValue Ops[] = {Load->getChain(), IntID, PassThru, - Load->getBasePtr(), Mask, VL}; + SDValue Policy = DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT); + SDValue Ops[] = { + Load->getChain(), IntID, PassThru, Load->getBasePtr(), Mask, VL, Policy}; SDValue Result = DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, Load->getMemoryVT(), Load->getMemOperand()); @@ -4756,6 +4757,8 @@ if (!IsUnmasked) Ops.push_back(Mask); Ops.push_back(VL); + if (!IsUnmasked) + Ops.push_back(DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT)); SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other}); SDValue Result = diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp --- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp +++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp @@ -908,7 +908,11 @@ if (RISCVII::hasSEWOp(TSFlags)) { VSETVLIInfo NewInfo = computeInfoForInstr(MI, TSFlags, MRI); if (RISCVII::hasVLOp(TSFlags)) { - MachineOperand &VLOp = MI.getOperand(MI.getNumExplicitOperands() - 2); + unsigned Offset = 2; + if (RISCVII::hasVecPolicyOp(TSFlags)) + Offset = 3; + MachineOperand &VLOp = + MI.getOperand(MI.getNumExplicitOperands() - Offset); if (VLOp.isReg()) { // Erase the AVL operand from the instruction. VLOp.setReg(RISCV::NoRegister); diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -644,7 +644,7 @@ Pseudo<(outs GetVRegNoV0.R:$rd), (ins GetVRegNoV0.R:$merge, GPR:$rs1, - VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>, + VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy),[]>, RISCVVPseudo, RISCVVLE.val, VLMul> { let mayLoad = 1; @@ -654,6 +654,7 @@ let HasVLOp = 1; let HasSEWOp = 1; let HasMergeOp = 1; + let HasVecPolicyOp = 1; let BaseInstr = !cast(PseudoToVInst.VInst); } @@ -675,7 +676,7 @@ Pseudo<(outs GetVRegNoV0.R:$rd), (ins GetVRegNoV0.R:$merge, GPR:$rs1, GPR:$rs2, - VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>, + VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy),[]>, RISCVVPseudo, RISCVVLE.val, VLMul> { let mayLoad = 1; @@ -685,6 +686,7 @@ let HasVLOp = 1; let HasSEWOp = 1; let HasMergeOp = 1; + let HasVecPolicyOp = 1; let BaseInstr = !cast(PseudoToVInst.VInst); } @@ -709,7 +711,7 @@ Pseudo<(outs GetVRegNoV0.R:$rd), (ins GetVRegNoV0.R:$merge, GPR:$rs1, IdxClass:$rs2, - VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>, + VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy),[]>, RISCVVPseudo, RISCVVLX.val, VLMul, LMUL> { let mayLoad = 1; @@ -719,6 +721,7 @@ let HasVLOp = 1; let HasSEWOp = 1; let HasMergeOp = 1; + let HasVecPolicyOp = 1; let BaseInstr = !cast(PseudoToVInst.VInst); } @@ -862,6 +865,22 @@ let BaseInstr = !cast(PseudoToVInst.VInst); } +class VPseudoUnaryMaskTA : + Pseudo<(outs GetVRegNoV0.R:$rd), + (ins GetVRegNoV0.R:$merge, OpClass:$rs2, + VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>, + RISCVVPseudo { + let mayLoad = 0; + let mayStore = 0; + let hasSideEffects = 0; + let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret; + let HasVLOp = 1; + let HasSEWOp = 1; + let HasMergeOp = 1; + let HasVecPolicyOp = 1; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + // mask unary operation without maskedoff class VPseudoMaskUnarySOutMask: Pseudo<(outs GPR:$rd), @@ -977,6 +996,26 @@ let BaseInstr = !cast(PseudoToVInst.VInst); } +class VPseudoBinaryMaskTA : + Pseudo<(outs GetVRegNoV0.R:$rd), + (ins GetVRegNoV0.R:$merge, + Op1Class:$rs2, Op2Class:$rs1, + VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>, + RISCVVPseudo { + let mayLoad = 0; + let mayStore = 0; + let hasSideEffects = 0; + let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret; + let HasVLOp = 1; + let HasSEWOp = 1; + let HasMergeOp = 1; + let HasVecPolicyOp = 1; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + // Like VPseudoBinaryMask, but output can be V0. class VPseudoBinaryMOutMask.R:$rd), (ins GetVRegNoV0.R:$merge, Op2Class:$rs1, - VMaskOp:$vm, AVL:$vl, ixlenimm:$sew), []>, + VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>, RISCVVPseudo { let mayLoad = 0; let mayStore = 0; @@ -1015,6 +1054,7 @@ let HasVLOp = 1; let HasSEWOp = 1; let HasMergeOp = 0; // Merge is also rs2. + let HasVecPolicyOp = 1; let BaseInstr = !cast(PseudoToVInst.VInst); } @@ -1161,7 +1201,7 @@ class VPseudoUSSegLoadMask NF, bit isFF>: Pseudo<(outs GetVRegNoV0.R:$rd), (ins GetVRegNoV0.R:$merge, GPR:$rs1, - VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>, + VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy),[]>, RISCVVPseudo, RISCVVLSEG.val, VLMul> { let mayLoad = 1; @@ -1171,6 +1211,7 @@ let HasVLOp = 1; let HasSEWOp = 1; let HasMergeOp = 1; + let HasVecPolicyOp = 1; let BaseInstr = !cast(PseudoToVInst.VInst); } @@ -1192,7 +1233,8 @@ class VPseudoSSegLoadMask NF>: Pseudo<(outs GetVRegNoV0.R:$rd), (ins GetVRegNoV0.R:$merge, GPR:$rs1, - GPR:$offset, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>, + GPR:$offset, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, + ixlenimm:$policy),[]>, RISCVVPseudo, RISCVVLSEG.val, VLMul> { let mayLoad = 1; @@ -1202,6 +1244,7 @@ let HasVLOp = 1; let HasSEWOp = 1; let HasMergeOp = 1; + let HasVecPolicyOp = 1; let BaseInstr = !cast(PseudoToVInst.VInst); } @@ -1227,7 +1270,8 @@ bits<4> NF, bit Ordered>: Pseudo<(outs GetVRegNoV0.R:$rd), (ins GetVRegNoV0.R:$merge, GPR:$rs1, - IdxClass:$offset, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>, + IdxClass:$offset, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, + ixlenimm:$policy),[]>, RISCVVPseudo, RISCVVLXSEG.val, VLMul, LMUL> { let mayLoad = 1; @@ -1239,6 +1283,7 @@ let HasVLOp = 1; let HasSEWOp = 1; let HasMergeOp = 1; + let HasVecPolicyOp = 1; let BaseInstr = !cast(PseudoToVInst.VInst); } @@ -1514,8 +1559,8 @@ let VLMul = MInfo.value in { def "_" # MInfo.MX : VPseudoBinaryNoMask; - def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMask; + def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMaskTA; } } @@ -1542,8 +1587,8 @@ let VLMul = lmul.value in { def "_" # lmul.MX # "_" # emul.MX : VPseudoBinaryNoMask; - def "_" # lmul.MX # "_" # emul.MX # "_MASK" : VPseudoBinaryMask; + def "_" # lmul.MX # "_" # emul.MX # "_MASK" : VPseudoBinaryMaskTA; } } @@ -1735,6 +1780,15 @@ } } +multiclass VPseudoUnaryTAV_V { + foreach m = MxList.m in { + let VLMul = m.value in { + def "_V_" # m.MX : VPseudoUnaryNoMask; + def "_V_" # m.MX # "_MASK" : VPseudoUnaryMaskTA; + } + } +} + multiclass VPseudoUnaryV_V { foreach m = MxList.m in { let VLMul = m.value in { @@ -1750,8 +1804,8 @@ { let VLMul = m.value in { def "_" # m.MX : VPseudoUnaryNoMask; - def "_" # m.MX # "_MASK" : VPseudoUnaryMask; + def "_" # m.MX # "_MASK" : VPseudoUnaryMaskTA; } } } @@ -1762,8 +1816,8 @@ { let VLMul = m.value in { def "_" # m.MX : VPseudoUnaryNoMask; - def "_" # m.MX # "_MASK" : VPseudoUnaryMask; + def "_" # m.MX # "_MASK" : VPseudoUnaryMaskTA; } } } @@ -1774,8 +1828,8 @@ { let VLMul = m.value in { def "_" # m.MX : VPseudoUnaryNoMask; - def "_" # m.MX # "_MASK" : VPseudoUnaryMask; + def "_" # m.MX # "_MASK" : VPseudoUnaryMaskTA; } } } @@ -2033,8 +2087,8 @@ string Constraint = ""> { let VLMul = MInfo.value in { def "_" # MInfo.MX : VPseudoUnaryNoMask; - def "_" # MInfo.MX # "_MASK" : VPseudoUnaryMask; + def "_" # MInfo.MX # "_MASK" : VPseudoUnaryMaskTA; } } @@ -2217,6 +2271,26 @@ (op2_type op2_reg_class:$rs2), (mask_type V0), GPR:$vl, sew)>; +class VPatUnaryMaskTA : + Pat<(result_type (!cast(intrinsic_name#"_mask") + (result_type result_reg_class:$merge), + (op2_type op2_reg_class:$rs2), + (mask_type V0), + VLOpFrag, (XLenVT timm:$policy))), + (!cast(inst#"_"#kind#"_"#vlmul.MX#"_MASK") + (result_type result_reg_class:$merge), + (op2_type op2_reg_class:$rs2), + (mask_type V0), GPR:$vl, sew, (XLenVT timm:$policy))>; + class VPatMaskUnaryNoMask : @@ -2318,6 +2392,28 @@ (op2_type op2_kind:$rs2), (mask_type V0), GPR:$vl, sew)>; +class VPatBinaryMaskTA : + Pat<(result_type (!cast(intrinsic_name#"_mask") + (result_type result_reg_class:$merge), + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + (mask_type V0), + VLOpFrag, (XLenVT timm:$policy))), + (!cast(inst#"_MASK") + (result_type result_reg_class:$merge), + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + (mask_type V0), GPR:$vl, sew, (XLenVT timm:$policy))>; + // Same as above but source operands are swapped. class VPatBinaryMaskSwapped(inst#"_MASK_TIED") (result_type result_reg_class:$merge), (op2_type op2_kind:$rs2), - (mask_type V0), GPR:$vl, sew)>; + (mask_type V0), GPR:$vl, sew, (XLenVT timm:$policy))>; class VPatTernaryNoMask; - def : VPatUnaryMask; + def : VPatUnaryMaskTA; } } @@ -2549,9 +2645,9 @@ def : VPatUnaryNoMask; - def : VPatUnaryMask; + def : VPatUnaryMaskTA; } } @@ -2597,6 +2693,24 @@ op2_kind>; } +multiclass VPatBinaryTA +{ + def : VPatBinaryNoMask; + def : VPatBinaryMaskTA; +} + multiclass VPatBinarySwapped; } +multiclass VPatConversionTA +{ + def : VPatUnaryNoMask; + def : VPatUnaryMaskTA; +} + multiclass VPatBinaryV_VV vtilist> { foreach vti = vtilist in - defm : VPatBinary; + defm : VPatBinaryTA; } multiclass VPatBinaryV_VV_INT vtilist> { foreach vti = vtilist in { defvar ivti = GetIntVTypeInfo.Vti; - defm : VPatBinary; + defm : VPatBinaryTA; } } @@ -2707,10 +2838,10 @@ defvar emul_str = octuple_to_str.ret; defvar ivti = !cast("VI" # eew # emul_str); defvar inst = instruction # "_VV_" # vti.LMul.MX # "_" # emul_str; - defm : VPatBinary; + defm : VPatBinaryTA; } } } @@ -2719,29 +2850,29 @@ list vtilist> { foreach vti = vtilist in { defvar kind = "V"#vti.ScalarSuffix; - defm : VPatBinary; + defm : VPatBinaryTA; } } multiclass VPatBinaryV_VX_INT vtilist> { foreach vti = vtilist in - defm : VPatBinary; + defm : VPatBinaryTA; } multiclass VPatBinaryV_VI vtilist, Operand imm_type> { foreach vti = vtilist in - defm : VPatBinary; + defm : VPatBinaryTA; } multiclass VPatBinaryM_MM { @@ -2756,10 +2887,10 @@ foreach VtiToWti = vtilist in { defvar Vti = VtiToWti.Vti; defvar Wti = VtiToWti.Wti; - defm : VPatBinary; + defm : VPatBinaryTA; } } @@ -2769,10 +2900,10 @@ defvar Vti = VtiToWti.Vti; defvar Wti = VtiToWti.Wti; defvar kind = "V"#Vti.ScalarSuffix; - defm : VPatBinary; + defm : VPatBinaryTA; } } @@ -2788,10 +2919,10 @@ def : VPatTiedBinaryMask; - def : VPatBinaryMask; + def : VPatBinaryMaskTA; } } @@ -2801,10 +2932,10 @@ defvar Vti = VtiToWti.Vti; defvar Wti = VtiToWti.Wti; defvar kind = "W"#Vti.ScalarSuffix; - defm : VPatBinary; + defm : VPatBinaryTA; } } @@ -2813,10 +2944,10 @@ foreach VtiToWti = vtilist in { defvar Vti = VtiToWti.Vti; defvar Wti = VtiToWti.Wti; - defm : VPatBinary; + defm : VPatBinaryTA; } } @@ -2826,10 +2957,10 @@ defvar Vti = VtiToWti.Vti; defvar Wti = VtiToWti.Wti; defvar kind = "W"#Vti.ScalarSuffix; - defm : VPatBinary; + defm : VPatBinaryTA; } } @@ -2838,10 +2969,10 @@ foreach VtiToWti = vtilist in { defvar Vti = VtiToWti.Vti; defvar Wti = VtiToWti.Wti; - defm : VPatBinary; + defm : VPatBinaryTA; } } @@ -3174,8 +3305,8 @@ } } -multiclass VPatConversionVI_VF +multiclass VPatClassifyVI_VF { foreach fvti = AllFloatVectors in { @@ -3187,6 +3318,19 @@ } } +multiclass VPatConversionVI_VF +{ + foreach fvti = AllFloatVectors in + { + defvar ivti = GetIntVTypeInfo.Vti; + + defm : VPatConversionTA; + } +} + multiclass VPatConversionVF_VI { @@ -3194,9 +3338,9 @@ { defvar ivti = GetIntVTypeInfo.Vti; - defm : VPatConversion; + defm : VPatConversionTA; } } @@ -3206,9 +3350,9 @@ defvar fvti = fvtiToFWti.Vti; defvar iwti = GetIntVTypeInfo.Vti; - defm : VPatConversion; + defm : VPatConversionTA; } } @@ -3218,9 +3362,9 @@ defvar vti = vtiToWti.Vti; defvar fwti = vtiToWti.Wti; - defm : VPatConversion; + defm : VPatConversionTA; } } @@ -3230,9 +3374,9 @@ defvar fvti = fvtiToFWti.Vti; defvar fwti = fvtiToFWti.Wti; - defm : VPatConversion; + defm : VPatConversionTA; } } @@ -3242,9 +3386,9 @@ defvar vti = vtiToWti.Vti; defvar fwti = vtiToWti.Wti; - defm : VPatConversion; + defm : VPatConversionTA; } } @@ -3254,9 +3398,9 @@ defvar fvti = fvtiToFWti.Vti; defvar iwti = GetIntVTypeInfo.Vti; - defm : VPatConversion; + defm : VPatConversionTA; } } @@ -3266,9 +3410,9 @@ defvar fvti = fvtiToFWti.Vti; defvar fwti = fvtiToFWti.Wti; - defm : VPatConversion; + defm : VPatConversionTA; } } @@ -3475,14 +3619,16 @@ (vti.Vector vti.RegClass:$rs2), (vti.Vector vti.RegClass:$rs1), (vti.Mask V0), - VLOpFrag)), + VLOpFrag, + (XLenVT timm:$policy))), (!cast("PseudoVSUB_VV_"#vti.LMul.MX#"_MASK") vti.RegClass:$merge, vti.RegClass:$rs1, vti.RegClass:$rs2, (vti.Mask V0), GPR:$vl, - vti.Log2SEW)>; + vti.Log2SEW, + (XLenVT timm:$policy))>; // Match VSUB with a small immediate to vadd.vi by negating the immediate. def : Pat<(vti.Vector (int_riscv_vsub (vti.Vector vti.RegClass:$rs1), @@ -3496,14 +3642,16 @@ (vti.Vector vti.RegClass:$rs1), (vti.Scalar simm5_plus1:$rs2), (vti.Mask V0), - VLOpFrag)), + VLOpFrag, + (XLenVT timm:$policy))), (!cast("PseudoVADD_VI_"#vti.LMul.MX#"_MASK") vti.RegClass:$merge, vti.RegClass:$rs1, (NegImm simm5_plus1:$rs2), (vti.Mask V0), GPR:$vl, - vti.Log2SEW)>; + vti.Log2SEW, + (XLenVT timm:$policy))>; } //===----------------------------------------------------------------------===// @@ -3724,17 +3872,17 @@ //===----------------------------------------------------------------------===// // 14.8. Vector Floating-Point Square-Root Instruction //===----------------------------------------------------------------------===// -defm PseudoVFSQRT : VPseudoUnaryV_V; +defm PseudoVFSQRT : VPseudoUnaryTAV_V; //===----------------------------------------------------------------------===// // 14.9. Vector Floating-Point Reciprocal Square-Root Estimate Instruction //===----------------------------------------------------------------------===// -defm PseudoVFRSQRT7 : VPseudoUnaryV_V; +defm PseudoVFRSQRT7 : VPseudoUnaryTAV_V; //===----------------------------------------------------------------------===// // 14.10. Vector Floating-Point Reciprocal Estimate Instruction //===----------------------------------------------------------------------===// -defm PseudoVFREC7 : VPseudoUnaryV_V; +defm PseudoVFREC7 : VPseudoUnaryTAV_V; //===----------------------------------------------------------------------===// // 14.11. Vector Floating-Point Min/Max Instructions @@ -4420,7 +4568,7 @@ //===----------------------------------------------------------------------===// // 14.14. Vector Floating-Point Classify Instruction //===----------------------------------------------------------------------===// -defm : VPatConversionVI_VF<"int_riscv_vfclass", "PseudoVFCLASS">; +defm : VPatClassifyVI_VF<"int_riscv_vfclass", "PseudoVFCLASS">; //===----------------------------------------------------------------------===// // 14.15. Vector Floating-Point Merge Instruction diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td @@ -293,7 +293,7 @@ (result_type (IMPLICIT_DEF)), op_reg_class:$rs1, op_reg_class:$rs2, - VMV0:$vm, GPR:$vl, sew)>; + VMV0:$vm, GPR:$vl, sew, TAIL_AGNOSTIC)>; } multiclass VPatBinaryVL_XI; + VMV0:$vm, GPR:$vl, sew, TAIL_AGNOSTIC)>; } multiclass VPatBinaryVL_VV_VX { @@ -645,7 +645,7 @@ VLOpFrag), (!cast("PseudoVRSUB_VX_"# vti.LMul.MX#"_MASK") (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, GPR:$rs2, - VMV0:$vm, GPR:$vl, vti.Log2SEW)>; + VMV0:$vm, GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; def : Pat<(riscv_sub_vl (vti.Vector (SplatPat_simm5 simm5:$rs2)), (vti.Vector vti.RegClass:$rs1), (vti.Mask true_mask), VLOpFrag), @@ -656,7 +656,7 @@ VLOpFrag), (!cast("PseudoVRSUB_VI_"# vti.LMul.MX#"_MASK") (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, simm5:$rs2, - VMV0:$vm, GPR:$vl, vti.Log2SEW)>; + VMV0:$vm, GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; } // 12.3. Vector Integer Extension @@ -1312,7 +1312,7 @@ VLOpFrag)), (!cast("PseudoVRGATHER_VV_"# vti.LMul.MX#"_MASK") vti.RegClass:$merge, vti.RegClass:$rs2, vti.RegClass:$rs1, - vti.Mask:$vm, GPR:$vl, vti.Log2SEW)>; + vti.Mask:$vm, GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask VMV0:$vm), (riscv_vrgather_vx_vl @@ -1324,7 +1324,7 @@ VLOpFrag)), (!cast("PseudoVRGATHER_VI_"# vti.LMul.MX#"_MASK") vti.RegClass:$merge, vti.RegClass:$rs2, uimm5:$imm, - vti.Mask:$vm, GPR:$vl, vti.Log2SEW)>; + vti.Mask:$vm, GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; // emul = lmul * 16 / sew defvar vlmul = vti.LMul; @@ -1351,7 +1351,7 @@ VLOpFrag)), (!cast(inst#"_MASK") vti.RegClass:$merge, vti.RegClass:$rs2, ivti.RegClass:$rs1, - vti.Mask:$vm, GPR:$vl, vti.Log2SEW)>; + vti.Mask:$vm, GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; } } @@ -1395,7 +1395,7 @@ VLOpFrag)), (!cast("PseudoVRGATHER_VV_"# vti.LMul.MX#"_MASK") vti.RegClass:$merge, vti.RegClass:$rs2, vti.RegClass:$rs1, - vti.Mask:$vm, GPR:$vl, vti.Log2SEW)>; + vti.Mask:$vm, GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask VMV0:$vm), (riscv_vrgather_vx_vl @@ -1407,7 +1407,7 @@ VLOpFrag)), (!cast("PseudoVRGATHER_VI_"# vti.LMul.MX#"_MASK") vti.RegClass:$merge, vti.RegClass:$rs2, uimm5:$imm, - vti.Mask:$vm, GPR:$vl, vti.Log2SEW)>; + vti.Mask:$vm, GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; defvar vlmul = vti.LMul; defvar octuple_lmul = vlmul.octuple; @@ -1433,7 +1433,7 @@ VLOpFrag)), (!cast(inst#"_MASK") vti.RegClass:$merge, vti.RegClass:$rs2, ivti.RegClass:$rs1, - vti.Mask:$vm, GPR:$vl, vti.Log2SEW)>; + vti.Mask:$vm, GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; } } diff --git a/llvm/test/CodeGen/RISCV/rvv/common-shuffle-patterns.ll b/llvm/test/CodeGen/RISCV/rvv/common-shuffle-patterns.ll --- a/llvm/test/CodeGen/RISCV/rvv/common-shuffle-patterns.ll +++ b/llvm/test/CodeGen/RISCV/rvv/common-shuffle-patterns.ll @@ -32,7 +32,7 @@ ; CHECK-NEXT: addiw a0, a0, -1366 ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; CHECK-NEXT: vmv.s.x v0, a0 -; CHECK-NEXT: vsetivli zero, 16, e16, m2, tu, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vrgather.vv v8, v28, v12, v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll @@ -62,14 +62,12 @@ ; LMULMAX2-NEXT: vmv.s.x v0, a0 ; LMULMAX2-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; LMULMAX2-NEXT: vrgather.vi v25, v8, 0 -; LMULMAX2-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; LMULMAX2-NEXT: vrgather.vi v25, v9, 3, v0.t ; LMULMAX2-NEXT: addi a0, zero, 8 ; LMULMAX2-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; LMULMAX2-NEXT: vmv.s.x v0, a0 ; LMULMAX2-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; LMULMAX2-NEXT: vrgather.vi v26, v10, 0 -; LMULMAX2-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; LMULMAX2-NEXT: vrgather.vi v26, v11, 3, v0.t ; LMULMAX2-NEXT: addi a0, zero, 3 ; LMULMAX2-NEXT: vsetivli zero, 1, e8, mf8, ta, mu diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll @@ -142,7 +142,7 @@ ; RV32-NEXT: addi a0, zero, 8 ; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; RV32-NEXT: vmv.s.x v0, a0 -; RV32-NEXT: vsetivli zero, 4, e64, m2, tu, mu +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vrgather.vi v26, v10, 1, v0.t ; RV32-NEXT: vmv2r.v v8, v26 ; RV32-NEXT: ret @@ -157,7 +157,7 @@ ; RV64-NEXT: addi a0, zero, 8 ; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; RV64-NEXT: vmv.s.x v0, a0 -; RV64-NEXT: vsetivli zero, 4, e64, m2, tu, mu +; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vrgather.vi v26, v10, 1, v0.t ; RV64-NEXT: vmv2r.v v8, v26 ; RV64-NEXT: ret @@ -177,7 +177,7 @@ ; RV32-NEXT: vlse64.v v26, (a0), zero ; RV32-NEXT: vid.v v25 ; RV32-NEXT: vrsub.vi v25, v25, 4 -; RV32-NEXT: vsetvli zero, zero, e64, m2, tu, mu +; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; RV32-NEXT: vrgatherei16.vv v26, v8, v25, v0.t ; RV32-NEXT: vmv2r.v v8, v26 ; RV32-NEXT: ret @@ -193,7 +193,6 @@ ; RV64-NEXT: vlse64.v v26, (a0), zero ; RV64-NEXT: vid.v v28 ; RV64-NEXT: vrsub.vi v28, v28, 4 -; RV64-NEXT: vsetvli zero, zero, e64, m2, tu, mu ; RV64-NEXT: vrgather.vv v26, v8, v28, v0.t ; RV64-NEXT: vmv2r.v v8, v26 ; RV64-NEXT: ret @@ -214,7 +213,6 @@ ; RV32-NEXT: addi a0, a0, %lo(.LCPI8_0) ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v26, (a0), zero -; RV32-NEXT: vsetvli zero, zero, e64, m2, tu, mu ; RV32-NEXT: vrgatherei16.vv v26, v8, v25, v0.t ; RV32-NEXT: vmv2r.v v8, v26 ; RV32-NEXT: ret @@ -231,7 +229,6 @@ ; RV64-NEXT: addi a0, a0, %lo(.LCPI8_0) ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vlse64.v v26, (a0), zero -; RV64-NEXT: vsetvli zero, zero, e64, m2, tu, mu ; RV64-NEXT: vrgather.vv v26, v8, v28, v0.t ; RV64-NEXT: vmv2r.v v8, v26 ; RV64-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll @@ -93,7 +93,7 @@ ; CHECK-NEXT: addi a0, zero, 8 ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; CHECK-NEXT: vmv.s.x v0, a0 -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vrgather.vi v25, v9, 1, v0.t ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: ret @@ -111,7 +111,6 @@ ; CHECK-NEXT: vid.v v25 ; CHECK-NEXT: vrsub.vi v26, v25, 4 ; CHECK-NEXT: vmv.v.i v25, 5 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu ; CHECK-NEXT: vrgather.vv v25, v8, v26, v0.t ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: ret @@ -130,7 +129,6 @@ ; CHECK-NEXT: vmv.s.x v0, a0 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v25, 5 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu ; CHECK-NEXT: vrgather.vv v25, v8, v26, v0.t ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: ret @@ -206,7 +204,7 @@ ; RV32-NEXT: addi a0, zero, 164 ; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; RV32-NEXT: vmv.s.x v0, a0 -; RV32-NEXT: vsetivli zero, 8, e64, m4, tu, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vrgatherei16.vv v28, v12, v26, v0.t ; RV32-NEXT: vmv4r.v v8, v28 ; RV32-NEXT: ret @@ -227,7 +225,7 @@ ; RV64-NEXT: addi a0, zero, 164 ; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; RV64-NEXT: vmv.s.x v0, a0 -; RV64-NEXT: vsetivli zero, 8, e64, m4, tu, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vrgather.vv v28, v12, v16, v0.t ; RV64-NEXT: vmv4r.v v8, v28 ; RV64-NEXT: ret @@ -252,7 +250,7 @@ ; RV32-NEXT: addi a0, a0, %lo(.LCPI12_1) ; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; RV32-NEXT: vle16.v v25, (a0) -; RV32-NEXT: vsetvli zero, zero, e64, m4, tu, mu +; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vrgatherei16.vv v28, v8, v25, v0.t ; RV32-NEXT: vmv4r.v v8, v28 ; RV32-NEXT: ret @@ -267,7 +265,6 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vle64.v v12, (a0) ; RV64-NEXT: vmv.v.i v28, -1 -; RV64-NEXT: vsetvli zero, zero, e64, m4, tu, mu ; RV64-NEXT: vrgather.vv v28, v8, v12, v0.t ; RV64-NEXT: vmv4r.v v8, v28 ; RV64-NEXT: ret @@ -282,9 +279,8 @@ ; RV32-NEXT: addi a0, a0, %lo(.LCPI13_0) ; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; RV32-NEXT: vle16.v v25, (a0) -; RV32-NEXT: vmv4r.v v28, v8 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu -; RV32-NEXT: vrgatherei16.vv v8, v28, v25 +; RV32-NEXT: vrgatherei16.vv v28, v8, v25 ; RV32-NEXT: addi a0, zero, 140 ; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; RV32-NEXT: vmv.s.x v0, a0 @@ -293,9 +289,9 @@ ; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; RV32-NEXT: vle16.v v25, (a0) ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu -; RV32-NEXT: vmv.v.i v28, 5 -; RV32-NEXT: vsetvli zero, zero, e64, m4, tu, mu -; RV32-NEXT: vrgatherei16.vv v8, v28, v25, v0.t +; RV32-NEXT: vmv.v.i v8, 5 +; RV32-NEXT: vrgatherei16.vv v28, v8, v25, v0.t +; RV32-NEXT: vmv4r.v v8, v28 ; RV32-NEXT: ret ; ; RV64-LABEL: vrgather_shuffle_vx_v8i64: @@ -308,7 +304,6 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vle64.v v12, (a0) ; RV64-NEXT: vmv.v.i v28, 5 -; RV64-NEXT: vsetvli zero, zero, e64, m4, tu, mu ; RV64-NEXT: vrgather.vv v28, v8, v12, v0.t ; RV64-NEXT: vmv4r.v v8, v28 ; RV64-NEXT: ret @@ -330,7 +325,6 @@ ; CHECK-NEXT: vid.v v26 ; CHECK-NEXT: vsrl.vi v26, v26, 1 ; CHECK-NEXT: vmv.v.x v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, tu, mu ; CHECK-NEXT: vrgather.vv v8, v25, v26, v0.t ; CHECK-NEXT: ret %y = shufflevector <4 x i8> %x, <4 x i8> undef, <4 x i32> @@ -391,7 +385,6 @@ ; CHECK-NEXT: vmv.s.x v0, a0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vrgather.vi v25, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e8, mf2, tu, mu ; CHECK-NEXT: vrgather.vi v25, v9, 0, v0.t ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: ret @@ -412,7 +405,7 @@ ; CHECK-NEXT: addi a0, zero, 66 ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; CHECK-NEXT: vmv.s.x v0, a0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vrgather.vi v25, v9, 0, v0.t ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: ret @@ -430,7 +423,7 @@ ; CHECK-NEXT: vrgather.vi v25, v8, 2 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v26, 4 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vrgather.vv v25, v9, v26, v0.t ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: ret @@ -450,7 +443,7 @@ ; RV32-NEXT: addi a0, zero, 66 ; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; RV32-NEXT: vmv.s.x v0, a0 -; RV32-NEXT: vsetivli zero, 8, e8, mf2, tu, mu +; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV32-NEXT: vrgather.vi v25, v9, 0, v0.t ; RV32-NEXT: vmv1r.v v8, v25 ; RV32-NEXT: ret @@ -466,7 +459,7 @@ ; RV64-NEXT: addi a0, zero, 66 ; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; RV64-NEXT: vmv.s.x v0, a0 -; RV64-NEXT: vsetivli zero, 8, e8, mf2, tu, mu +; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV64-NEXT: vrgather.vi v25, v9, 0, v0.t ; RV64-NEXT: vmv1r.v v8, v25 ; RV64-NEXT: ret @@ -488,7 +481,6 @@ ; CHECK-NEXT: vmv.s.x v0, a0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vrgather.vi v25, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e8, mf2, tu, mu ; CHECK-NEXT: vrgather.vv v25, v9, v26, v0.t ; CHECK-NEXT: vmv1r.v v8, v25 ; CHECK-NEXT: ret @@ -514,7 +506,7 @@ ; RV32-NEXT: addi a0, zero, 98 ; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; RV32-NEXT: vmv.s.x v0, a0 -; RV32-NEXT: vsetivli zero, 8, e8, mf2, tu, mu +; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV32-NEXT: vrgather.vv v25, v9, v26, v0.t ; RV32-NEXT: vmv1r.v v8, v25 ; RV32-NEXT: ret @@ -536,7 +528,7 @@ ; RV64-NEXT: addi a0, zero, 98 ; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; RV64-NEXT: vmv.s.x v0, a0 -; RV64-NEXT: vsetivli zero, 8, e8, mf2, tu, mu +; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV64-NEXT: vrgather.vv v25, v9, v26, v0.t ; RV64-NEXT: vmv1r.v v8, v25 ; RV64-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll @@ -9,14 +9,14 @@ define <1 x i8> @mgather_v1i8(<1 x i8*> %ptrs, <1 x i1> %m, <1 x i8> %passthru) { ; RV32-LABEL: mgather_v1i8: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 1, e8, mf8, tu, mu +; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v1i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e8, mf8, tu, mu +; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v9 ; RV64-NEXT: ret @@ -29,14 +29,14 @@ define <2 x i8> @mgather_v2i8(<2 x i8*> %ptrs, <2 x i1> %m, <2 x i8> %passthru) { ; RV32-LABEL: mgather_v2i8: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 2, e8, mf8, tu, mu +; RV32-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v2i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e8, mf8, tu, mu +; RV64-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v9 ; RV64-NEXT: ret @@ -47,7 +47,7 @@ define <2 x i16> @mgather_v2i8_sextload_v2i16(<2 x i8*> %ptrs, <2 x i1> %m, <2 x i8> %passthru) { ; RV32-LABEL: mgather_v2i8_sextload_v2i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 2, e8, mf8, tu, mu +; RV32-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; RV32-NEXT: vsext.vf2 v8, v9 @@ -55,7 +55,7 @@ ; ; RV64-LABEL: mgather_v2i8_sextload_v2i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e8, mf8, tu, mu +; RV64-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; RV64-NEXT: vsext.vf2 v8, v9 @@ -68,7 +68,7 @@ define <2 x i16> @mgather_v2i8_zextload_v2i16(<2 x i8*> %ptrs, <2 x i1> %m, <2 x i8> %passthru) { ; RV32-LABEL: mgather_v2i8_zextload_v2i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 2, e8, mf8, tu, mu +; RV32-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; RV32-NEXT: vzext.vf2 v8, v9 @@ -76,7 +76,7 @@ ; ; RV64-LABEL: mgather_v2i8_zextload_v2i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e8, mf8, tu, mu +; RV64-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; RV64-NEXT: vzext.vf2 v8, v9 @@ -89,7 +89,7 @@ define <2 x i32> @mgather_v2i8_sextload_v2i32(<2 x i8*> %ptrs, <2 x i1> %m, <2 x i8> %passthru) { ; RV32-LABEL: mgather_v2i8_sextload_v2i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 2, e8, mf8, tu, mu +; RV32-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; RV32-NEXT: vsext.vf4 v8, v9 @@ -97,7 +97,7 @@ ; ; RV64-LABEL: mgather_v2i8_sextload_v2i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e8, mf8, tu, mu +; RV64-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; RV64-NEXT: vsext.vf4 v8, v9 @@ -110,7 +110,7 @@ define <2 x i32> @mgather_v2i8_zextload_v2i32(<2 x i8*> %ptrs, <2 x i1> %m, <2 x i8> %passthru) { ; RV32-LABEL: mgather_v2i8_zextload_v2i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 2, e8, mf8, tu, mu +; RV32-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; RV32-NEXT: vzext.vf4 v8, v9 @@ -118,7 +118,7 @@ ; ; RV64-LABEL: mgather_v2i8_zextload_v2i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e8, mf8, tu, mu +; RV64-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; RV64-NEXT: vzext.vf4 v8, v9 @@ -131,7 +131,7 @@ define <2 x i64> @mgather_v2i8_sextload_v2i64(<2 x i8*> %ptrs, <2 x i1> %m, <2 x i8> %passthru) { ; RV32-LABEL: mgather_v2i8_sextload_v2i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 2, e8, mf8, tu, mu +; RV32-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV32-NEXT: vsext.vf8 v8, v9 @@ -139,7 +139,7 @@ ; ; RV64-LABEL: mgather_v2i8_sextload_v2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e8, mf8, tu, mu +; RV64-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV64-NEXT: vsext.vf8 v8, v9 @@ -152,7 +152,7 @@ define <2 x i64> @mgather_v2i8_zextload_v2i64(<2 x i8*> %ptrs, <2 x i1> %m, <2 x i8> %passthru) { ; RV32-LABEL: mgather_v2i8_zextload_v2i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 2, e8, mf8, tu, mu +; RV32-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV32-NEXT: vzext.vf8 v8, v9 @@ -160,7 +160,7 @@ ; ; RV64-LABEL: mgather_v2i8_zextload_v2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e8, mf8, tu, mu +; RV64-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV64-NEXT: vzext.vf8 v8, v9 @@ -175,14 +175,14 @@ define <4 x i8> @mgather_v4i8(<4 x i8*> %ptrs, <4 x i1> %m, <4 x i8> %passthru) { ; RV32-LABEL: mgather_v4i8: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 4, e8, mf4, tu, mu +; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v4i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 4, e8, mf4, tu, mu +; RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v10 ; RV64-NEXT: ret @@ -229,14 +229,14 @@ define <8 x i8> @mgather_v8i8(<8 x i8*> %ptrs, <8 x i1> %m, <8 x i8> %passthru) { ; RV32-LABEL: mgather_v8i8: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e8, mf2, tu, mu +; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v8i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e8, mf2, tu, mu +; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v12 ; RV64-NEXT: ret @@ -249,7 +249,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV32-NEXT: vsext.vf4 v26, v8 -; RV32-NEXT: vsetvli zero, zero, e8, mf2, tu, mu +; RV32-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; RV32-NEXT: vluxei32.v v9, (a0), v26, v0.t ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret @@ -258,7 +258,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vsext.vf8 v28, v8 -; RV64-NEXT: vsetvli zero, zero, e8, mf2, tu, mu +; RV64-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; RV64-NEXT: vluxei64.v v9, (a0), v28, v0.t ; RV64-NEXT: vmv1r.v v8, v9 ; RV64-NEXT: ret @@ -272,14 +272,14 @@ define <1 x i16> @mgather_v1i16(<1 x i16*> %ptrs, <1 x i1> %m, <1 x i16> %passthru) { ; RV32-LABEL: mgather_v1i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 1, e16, mf4, tu, mu +; RV32-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v1i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e16, mf4, tu, mu +; RV64-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v9 ; RV64-NEXT: ret @@ -292,14 +292,14 @@ define <2 x i16> @mgather_v2i16(<2 x i16*> %ptrs, <2 x i1> %m, <2 x i16> %passthru) { ; RV32-LABEL: mgather_v2i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 2, e16, mf4, tu, mu +; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v2i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e16, mf4, tu, mu +; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v9 ; RV64-NEXT: ret @@ -310,7 +310,7 @@ define <2 x i32> @mgather_v2i16_sextload_v2i32(<2 x i16*> %ptrs, <2 x i1> %m, <2 x i16> %passthru) { ; RV32-LABEL: mgather_v2i16_sextload_v2i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 2, e16, mf4, tu, mu +; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; RV32-NEXT: vsext.vf2 v8, v9 @@ -318,7 +318,7 @@ ; ; RV64-LABEL: mgather_v2i16_sextload_v2i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e16, mf4, tu, mu +; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; RV64-NEXT: vsext.vf2 v8, v9 @@ -331,7 +331,7 @@ define <2 x i32> @mgather_v2i16_zextload_v2i32(<2 x i16*> %ptrs, <2 x i1> %m, <2 x i16> %passthru) { ; RV32-LABEL: mgather_v2i16_zextload_v2i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 2, e16, mf4, tu, mu +; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; RV32-NEXT: vzext.vf2 v8, v9 @@ -339,7 +339,7 @@ ; ; RV64-LABEL: mgather_v2i16_zextload_v2i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e16, mf4, tu, mu +; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; RV64-NEXT: vzext.vf2 v8, v9 @@ -352,7 +352,7 @@ define <2 x i64> @mgather_v2i16_sextload_v2i64(<2 x i16*> %ptrs, <2 x i1> %m, <2 x i16> %passthru) { ; RV32-LABEL: mgather_v2i16_sextload_v2i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 2, e16, mf4, tu, mu +; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV32-NEXT: vsext.vf4 v8, v9 @@ -360,7 +360,7 @@ ; ; RV64-LABEL: mgather_v2i16_sextload_v2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e16, mf4, tu, mu +; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV64-NEXT: vsext.vf4 v8, v9 @@ -373,7 +373,7 @@ define <2 x i64> @mgather_v2i16_zextload_v2i64(<2 x i16*> %ptrs, <2 x i1> %m, <2 x i16> %passthru) { ; RV32-LABEL: mgather_v2i16_zextload_v2i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 2, e16, mf4, tu, mu +; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV32-NEXT: vzext.vf4 v8, v9 @@ -381,7 +381,7 @@ ; ; RV64-LABEL: mgather_v2i16_zextload_v2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e16, mf4, tu, mu +; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV64-NEXT: vzext.vf4 v8, v9 @@ -396,14 +396,14 @@ define <4 x i16> @mgather_v4i16(<4 x i16*> %ptrs, <4 x i1> %m, <4 x i16> %passthru) { ; RV32-LABEL: mgather_v4i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 4, e16, mf2, tu, mu +; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v4i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 4, e16, mf2, tu, mu +; RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v10 ; RV64-NEXT: ret @@ -450,14 +450,14 @@ define <8 x i16> @mgather_v8i16(<8 x i16*> %ptrs, <8 x i1> %m, <8 x i16> %passthru) { ; RV32-LABEL: mgather_v8i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e16, m1, tu, mu +; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v8i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e16, m1, tu, mu +; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v12 ; RV64-NEXT: ret @@ -471,7 +471,7 @@ ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV32-NEXT: vsext.vf4 v26, v8 ; RV32-NEXT: vadd.vv v26, v26, v26 -; RV32-NEXT: vsetvli zero, zero, e16, m1, tu, mu +; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (a0), v26, v0.t ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret @@ -481,7 +481,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vsext.vf8 v28, v8 ; RV64-NEXT: vadd.vv v28, v28, v28 -; RV64-NEXT: vsetvli zero, zero, e16, m1, tu, mu +; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV64-NEXT: vluxei64.v v9, (a0), v28, v0.t ; RV64-NEXT: vmv1r.v v8, v9 ; RV64-NEXT: ret @@ -496,7 +496,7 @@ ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV32-NEXT: vsext.vf4 v26, v8 ; RV32-NEXT: vadd.vv v26, v26, v26 -; RV32-NEXT: vsetvli zero, zero, e16, m1, tu, mu +; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (a0), v26, v0.t ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret @@ -506,7 +506,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vsext.vf8 v28, v8 ; RV64-NEXT: vadd.vv v28, v28, v28 -; RV64-NEXT: vsetvli zero, zero, e16, m1, tu, mu +; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV64-NEXT: vluxei64.v v9, (a0), v28, v0.t ; RV64-NEXT: vmv1r.v v8, v9 ; RV64-NEXT: ret @@ -522,7 +522,7 @@ ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV32-NEXT: vzext.vf4 v26, v8 ; RV32-NEXT: vadd.vv v26, v26, v26 -; RV32-NEXT: vsetvli zero, zero, e16, m1, tu, mu +; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (a0), v26, v0.t ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret @@ -532,7 +532,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vzext.vf8 v28, v8 ; RV64-NEXT: vadd.vv v28, v28, v28 -; RV64-NEXT: vsetvli zero, zero, e16, m1, tu, mu +; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV64-NEXT: vluxei64.v v9, (a0), v28, v0.t ; RV64-NEXT: vmv1r.v v8, v9 ; RV64-NEXT: ret @@ -548,7 +548,7 @@ ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV32-NEXT: vsext.vf2 v26, v8 ; RV32-NEXT: vadd.vv v26, v26, v26 -; RV32-NEXT: vsetvli zero, zero, e16, m1, tu, mu +; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (a0), v26, v0.t ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret @@ -558,7 +558,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vsext.vf4 v28, v8 ; RV64-NEXT: vadd.vv v28, v28, v28 -; RV64-NEXT: vsetvli zero, zero, e16, m1, tu, mu +; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV64-NEXT: vluxei64.v v9, (a0), v28, v0.t ; RV64-NEXT: vmv1r.v v8, v9 ; RV64-NEXT: ret @@ -572,14 +572,14 @@ define <1 x i32> @mgather_v1i32(<1 x i32*> %ptrs, <1 x i1> %m, <1 x i32> %passthru) { ; RV32-LABEL: mgather_v1i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 1, e32, mf2, tu, mu +; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v1i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e32, mf2, tu, mu +; RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v9 ; RV64-NEXT: ret @@ -592,14 +592,14 @@ define <2 x i32> @mgather_v2i32(<2 x i32*> %ptrs, <2 x i1> %m, <2 x i32> %passthru) { ; RV32-LABEL: mgather_v2i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 2, e32, mf2, tu, mu +; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v2i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e32, mf2, tu, mu +; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v9 ; RV64-NEXT: ret @@ -610,7 +610,7 @@ define <2 x i64> @mgather_v2i32_sextload_v2i64(<2 x i32*> %ptrs, <2 x i1> %m, <2 x i32> %passthru) { ; RV32-LABEL: mgather_v2i32_sextload_v2i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 2, e32, mf2, tu, mu +; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV32-NEXT: vsext.vf2 v8, v9 @@ -618,7 +618,7 @@ ; ; RV64-LABEL: mgather_v2i32_sextload_v2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e32, mf2, tu, mu +; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV64-NEXT: vsext.vf2 v8, v9 @@ -631,7 +631,7 @@ define <2 x i64> @mgather_v2i32_zextload_v2i64(<2 x i32*> %ptrs, <2 x i1> %m, <2 x i32> %passthru) { ; RV32-LABEL: mgather_v2i32_zextload_v2i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 2, e32, mf2, tu, mu +; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV32-NEXT: vzext.vf2 v8, v9 @@ -639,7 +639,7 @@ ; ; RV64-LABEL: mgather_v2i32_zextload_v2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e32, mf2, tu, mu +; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV64-NEXT: vzext.vf2 v8, v9 @@ -654,14 +654,14 @@ define <4 x i32> @mgather_v4i32(<4 x i32*> %ptrs, <4 x i1> %m, <4 x i32> %passthru) { ; RV32-LABEL: mgather_v4i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 4, e32, m1, tu, mu +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v4i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 4, e32, m1, tu, mu +; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v10 ; RV64-NEXT: ret @@ -707,14 +707,14 @@ define <8 x i32> @mgather_v8i32(<8 x i32*> %ptrs, <8 x i1> %m, <8 x i32> %passthru) { ; RV32-LABEL: mgather_v8i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, tu, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t ; RV32-NEXT: vmv2r.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v8i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e32, m2, tu, mu +; RV64-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t ; RV64-NEXT: vmv2r.v v8, v12 ; RV64-NEXT: ret @@ -728,7 +728,6 @@ ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV32-NEXT: vsext.vf4 v26, v8 ; RV32-NEXT: vsll.vi v26, v26, 2 -; RV32-NEXT: vsetvli zero, zero, e32, m2, tu, mu ; RV32-NEXT: vluxei32.v v10, (a0), v26, v0.t ; RV32-NEXT: vmv2r.v v8, v10 ; RV32-NEXT: ret @@ -738,7 +737,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vsext.vf8 v28, v8 ; RV64-NEXT: vsll.vi v28, v28, 2 -; RV64-NEXT: vsetvli zero, zero, e32, m2, tu, mu +; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v28, v0.t ; RV64-NEXT: vmv2r.v v8, v10 ; RV64-NEXT: ret @@ -753,7 +752,6 @@ ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV32-NEXT: vsext.vf4 v26, v8 ; RV32-NEXT: vsll.vi v26, v26, 2 -; RV32-NEXT: vsetvli zero, zero, e32, m2, tu, mu ; RV32-NEXT: vluxei32.v v10, (a0), v26, v0.t ; RV32-NEXT: vmv2r.v v8, v10 ; RV32-NEXT: ret @@ -763,7 +761,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vsext.vf8 v28, v8 ; RV64-NEXT: vsll.vi v28, v28, 2 -; RV64-NEXT: vsetvli zero, zero, e32, m2, tu, mu +; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v28, v0.t ; RV64-NEXT: vmv2r.v v8, v10 ; RV64-NEXT: ret @@ -779,7 +777,6 @@ ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV32-NEXT: vzext.vf4 v26, v8 ; RV32-NEXT: vsll.vi v26, v26, 2 -; RV32-NEXT: vsetvli zero, zero, e32, m2, tu, mu ; RV32-NEXT: vluxei32.v v10, (a0), v26, v0.t ; RV32-NEXT: vmv2r.v v8, v10 ; RV32-NEXT: ret @@ -789,7 +786,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vzext.vf8 v28, v8 ; RV64-NEXT: vsll.vi v28, v28, 2 -; RV64-NEXT: vsetvli zero, zero, e32, m2, tu, mu +; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v28, v0.t ; RV64-NEXT: vmv2r.v v8, v10 ; RV64-NEXT: ret @@ -805,7 +802,6 @@ ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV32-NEXT: vsext.vf2 v26, v8 ; RV32-NEXT: vsll.vi v26, v26, 2 -; RV32-NEXT: vsetvli zero, zero, e32, m2, tu, mu ; RV32-NEXT: vluxei32.v v10, (a0), v26, v0.t ; RV32-NEXT: vmv2r.v v8, v10 ; RV32-NEXT: ret @@ -815,7 +811,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vsext.vf4 v28, v8 ; RV64-NEXT: vsll.vi v28, v28, 2 -; RV64-NEXT: vsetvli zero, zero, e32, m2, tu, mu +; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v28, v0.t ; RV64-NEXT: vmv2r.v v8, v10 ; RV64-NEXT: ret @@ -830,7 +826,6 @@ ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV32-NEXT: vsext.vf2 v26, v8 ; RV32-NEXT: vsll.vi v26, v26, 2 -; RV32-NEXT: vsetvli zero, zero, e32, m2, tu, mu ; RV32-NEXT: vluxei32.v v10, (a0), v26, v0.t ; RV32-NEXT: vmv2r.v v8, v10 ; RV32-NEXT: ret @@ -840,7 +835,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vsext.vf4 v28, v8 ; RV64-NEXT: vsll.vi v28, v28, 2 -; RV64-NEXT: vsetvli zero, zero, e32, m2, tu, mu +; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v28, v0.t ; RV64-NEXT: vmv2r.v v8, v10 ; RV64-NEXT: ret @@ -856,7 +851,6 @@ ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV32-NEXT: vzext.vf2 v26, v8 ; RV32-NEXT: vsll.vi v26, v26, 2 -; RV32-NEXT: vsetvli zero, zero, e32, m2, tu, mu ; RV32-NEXT: vluxei32.v v10, (a0), v26, v0.t ; RV32-NEXT: vmv2r.v v8, v10 ; RV32-NEXT: ret @@ -866,7 +860,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vzext.vf4 v28, v8 ; RV64-NEXT: vsll.vi v28, v28, 2 -; RV64-NEXT: vsetvli zero, zero, e32, m2, tu, mu +; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v28, v0.t ; RV64-NEXT: vmv2r.v v8, v10 ; RV64-NEXT: ret @@ -881,7 +875,6 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV32-NEXT: vsll.vi v26, v8, 2 -; RV32-NEXT: vsetvli zero, zero, e32, m2, tu, mu ; RV32-NEXT: vluxei32.v v10, (a0), v26, v0.t ; RV32-NEXT: vmv2r.v v8, v10 ; RV32-NEXT: ret @@ -891,7 +884,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vsext.vf2 v28, v8 ; RV64-NEXT: vsll.vi v28, v28, 2 -; RV64-NEXT: vsetvli zero, zero, e32, m2, tu, mu +; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v28, v0.t ; RV64-NEXT: vmv2r.v v8, v10 ; RV64-NEXT: ret @@ -905,14 +898,14 @@ define <1 x i64> @mgather_v1i64(<1 x i64*> %ptrs, <1 x i1> %m, <1 x i64> %passthru) { ; RV32-LABEL: mgather_v1i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 1, e64, m1, tu, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, tu, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v9 ; RV64-NEXT: ret @@ -925,14 +918,14 @@ define <2 x i64> @mgather_v2i64(<2 x i64*> %ptrs, <2 x i1> %m, <2 x i64> %passthru) { ; RV32-LABEL: mgather_v2i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 2, e64, m1, tu, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e64, m1, tu, mu +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v9 ; RV64-NEXT: ret @@ -945,14 +938,14 @@ define <4 x i64> @mgather_v4i64(<4 x i64*> %ptrs, <4 x i1> %m, <4 x i64> %passthru) { ; RV32-LABEL: mgather_v4i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 4, e64, m2, tu, mu +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t ; RV32-NEXT: vmv2r.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 4, e64, m2, tu, mu +; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vmv2r.v v8, v10 ; RV64-NEXT: ret @@ -998,14 +991,14 @@ define <8 x i64> @mgather_v8i64(<8 x i64*> %ptrs, <8 x i1> %m, <8 x i64> %passthru) { ; RV32-LABEL: mgather_v8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e64, m4, tu, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t ; RV32-NEXT: vmv4r.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, tu, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t ; RV64-NEXT: vmv4r.v v8, v12 ; RV64-NEXT: ret @@ -1019,7 +1012,7 @@ ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV32-NEXT: vsext.vf4 v26, v8 ; RV32-NEXT: vsll.vi v26, v26, 3 -; RV32-NEXT: vsetvli zero, zero, e64, m4, tu, mu +; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (a0), v26, v0.t ; RV32-NEXT: vmv4r.v v8, v12 ; RV32-NEXT: ret @@ -1029,7 +1022,6 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vsext.vf8 v28, v8 ; RV64-NEXT: vsll.vi v28, v28, 3 -; RV64-NEXT: vsetvli zero, zero, e64, m4, tu, mu ; RV64-NEXT: vluxei64.v v12, (a0), v28, v0.t ; RV64-NEXT: vmv4r.v v8, v12 ; RV64-NEXT: ret @@ -1044,7 +1036,6 @@ ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vsext.vf8 v28, v8 ; RV32-NEXT: vsll.vi v28, v28, 3 -; RV32-NEXT: vsetvli zero, zero, e64, m4, tu, mu ; RV32-NEXT: vluxei64.v v12, (a0), v28, v0.t ; RV32-NEXT: vmv4r.v v8, v12 ; RV32-NEXT: ret @@ -1054,7 +1045,6 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vsext.vf8 v28, v8 ; RV64-NEXT: vsll.vi v28, v28, 3 -; RV64-NEXT: vsetvli zero, zero, e64, m4, tu, mu ; RV64-NEXT: vluxei64.v v12, (a0), v28, v0.t ; RV64-NEXT: vmv4r.v v8, v12 ; RV64-NEXT: ret @@ -1070,7 +1060,6 @@ ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vzext.vf8 v28, v8 ; RV32-NEXT: vsll.vi v28, v28, 3 -; RV32-NEXT: vsetvli zero, zero, e64, m4, tu, mu ; RV32-NEXT: vluxei64.v v12, (a0), v28, v0.t ; RV32-NEXT: vmv4r.v v8, v12 ; RV32-NEXT: ret @@ -1080,7 +1069,6 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vzext.vf8 v28, v8 ; RV64-NEXT: vsll.vi v28, v28, 3 -; RV64-NEXT: vsetvli zero, zero, e64, m4, tu, mu ; RV64-NEXT: vluxei64.v v12, (a0), v28, v0.t ; RV64-NEXT: vmv4r.v v8, v12 ; RV64-NEXT: ret @@ -1096,7 +1084,7 @@ ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV32-NEXT: vsext.vf2 v26, v8 ; RV32-NEXT: vsll.vi v26, v26, 3 -; RV32-NEXT: vsetvli zero, zero, e64, m4, tu, mu +; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (a0), v26, v0.t ; RV32-NEXT: vmv4r.v v8, v12 ; RV32-NEXT: ret @@ -1106,7 +1094,6 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vsext.vf4 v28, v8 ; RV64-NEXT: vsll.vi v28, v28, 3 -; RV64-NEXT: vsetvli zero, zero, e64, m4, tu, mu ; RV64-NEXT: vluxei64.v v12, (a0), v28, v0.t ; RV64-NEXT: vmv4r.v v8, v12 ; RV64-NEXT: ret @@ -1121,7 +1108,6 @@ ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vsext.vf4 v28, v8 ; RV32-NEXT: vsll.vi v28, v28, 3 -; RV32-NEXT: vsetvli zero, zero, e64, m4, tu, mu ; RV32-NEXT: vluxei64.v v12, (a0), v28, v0.t ; RV32-NEXT: vmv4r.v v8, v12 ; RV32-NEXT: ret @@ -1131,7 +1117,6 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vsext.vf4 v28, v8 ; RV64-NEXT: vsll.vi v28, v28, 3 -; RV64-NEXT: vsetvli zero, zero, e64, m4, tu, mu ; RV64-NEXT: vluxei64.v v12, (a0), v28, v0.t ; RV64-NEXT: vmv4r.v v8, v12 ; RV64-NEXT: ret @@ -1147,7 +1132,6 @@ ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vzext.vf4 v28, v8 ; RV32-NEXT: vsll.vi v28, v28, 3 -; RV32-NEXT: vsetvli zero, zero, e64, m4, tu, mu ; RV32-NEXT: vluxei64.v v12, (a0), v28, v0.t ; RV32-NEXT: vmv4r.v v8, v12 ; RV32-NEXT: ret @@ -1157,7 +1141,6 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vzext.vf4 v28, v8 ; RV64-NEXT: vsll.vi v28, v28, 3 -; RV64-NEXT: vsetvli zero, zero, e64, m4, tu, mu ; RV64-NEXT: vluxei64.v v12, (a0), v28, v0.t ; RV64-NEXT: vmv4r.v v8, v12 ; RV64-NEXT: ret @@ -1172,7 +1155,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV32-NEXT: vsll.vi v26, v8, 3 -; RV32-NEXT: vsetvli zero, zero, e64, m4, tu, mu +; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (a0), v26, v0.t ; RV32-NEXT: vmv4r.v v8, v12 ; RV32-NEXT: ret @@ -1182,7 +1165,6 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vsext.vf2 v28, v8 ; RV64-NEXT: vsll.vi v28, v28, 3 -; RV64-NEXT: vsetvli zero, zero, e64, m4, tu, mu ; RV64-NEXT: vluxei64.v v12, (a0), v28, v0.t ; RV64-NEXT: vmv4r.v v8, v12 ; RV64-NEXT: ret @@ -1197,7 +1179,6 @@ ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vsext.vf2 v28, v8 ; RV32-NEXT: vsll.vi v28, v28, 3 -; RV32-NEXT: vsetvli zero, zero, e64, m4, tu, mu ; RV32-NEXT: vluxei64.v v12, (a0), v28, v0.t ; RV32-NEXT: vmv4r.v v8, v12 ; RV32-NEXT: ret @@ -1207,7 +1188,6 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vsext.vf2 v28, v8 ; RV64-NEXT: vsll.vi v28, v28, 3 -; RV64-NEXT: vsetvli zero, zero, e64, m4, tu, mu ; RV64-NEXT: vluxei64.v v12, (a0), v28, v0.t ; RV64-NEXT: vmv4r.v v8, v12 ; RV64-NEXT: ret @@ -1223,7 +1203,6 @@ ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vzext.vf2 v28, v8 ; RV32-NEXT: vsll.vi v28, v28, 3 -; RV32-NEXT: vsetvli zero, zero, e64, m4, tu, mu ; RV32-NEXT: vluxei64.v v12, (a0), v28, v0.t ; RV32-NEXT: vmv4r.v v8, v12 ; RV32-NEXT: ret @@ -1233,7 +1212,6 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vzext.vf2 v28, v8 ; RV64-NEXT: vsll.vi v28, v28, 3 -; RV64-NEXT: vsetvli zero, zero, e64, m4, tu, mu ; RV64-NEXT: vluxei64.v v12, (a0), v28, v0.t ; RV64-NEXT: vmv4r.v v8, v12 ; RV64-NEXT: ret @@ -1248,7 +1226,6 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vsll.vi v28, v8, 3 -; RV32-NEXT: vsetvli zero, zero, e64, m4, tu, mu ; RV32-NEXT: vluxei64.v v12, (a0), v28, v0.t ; RV32-NEXT: vmv4r.v v8, v12 ; RV32-NEXT: ret @@ -1257,7 +1234,6 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vsll.vi v28, v8, 3 -; RV64-NEXT: vsetvli zero, zero, e64, m4, tu, mu ; RV64-NEXT: vluxei64.v v12, (a0), v28, v0.t ; RV64-NEXT: vmv4r.v v8, v12 ; RV64-NEXT: ret @@ -1271,14 +1247,14 @@ define <1 x half> @mgather_v1f16(<1 x half*> %ptrs, <1 x i1> %m, <1 x half> %passthru) { ; RV32-LABEL: mgather_v1f16: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 1, e16, mf4, tu, mu +; RV32-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v1f16: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e16, mf4, tu, mu +; RV64-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v9 ; RV64-NEXT: ret @@ -1291,14 +1267,14 @@ define <2 x half> @mgather_v2f16(<2 x half*> %ptrs, <2 x i1> %m, <2 x half> %passthru) { ; RV32-LABEL: mgather_v2f16: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 2, e16, mf4, tu, mu +; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v2f16: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e16, mf4, tu, mu +; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v9 ; RV64-NEXT: ret @@ -1311,14 +1287,14 @@ define <4 x half> @mgather_v4f16(<4 x half*> %ptrs, <4 x i1> %m, <4 x half> %passthru) { ; RV32-LABEL: mgather_v4f16: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 4, e16, mf2, tu, mu +; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v4f16: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 4, e16, mf2, tu, mu +; RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v10 ; RV64-NEXT: ret @@ -1365,14 +1341,14 @@ define <8 x half> @mgather_v8f16(<8 x half*> %ptrs, <8 x i1> %m, <8 x half> %passthru) { ; RV32-LABEL: mgather_v8f16: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e16, m1, tu, mu +; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v8f16: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e16, m1, tu, mu +; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v12 ; RV64-NEXT: ret @@ -1386,7 +1362,7 @@ ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV32-NEXT: vsext.vf4 v26, v8 ; RV32-NEXT: vadd.vv v26, v26, v26 -; RV32-NEXT: vsetvli zero, zero, e16, m1, tu, mu +; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (a0), v26, v0.t ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret @@ -1396,7 +1372,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vsext.vf8 v28, v8 ; RV64-NEXT: vadd.vv v28, v28, v28 -; RV64-NEXT: vsetvli zero, zero, e16, m1, tu, mu +; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV64-NEXT: vluxei64.v v9, (a0), v28, v0.t ; RV64-NEXT: vmv1r.v v8, v9 ; RV64-NEXT: ret @@ -1411,7 +1387,7 @@ ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV32-NEXT: vsext.vf4 v26, v8 ; RV32-NEXT: vadd.vv v26, v26, v26 -; RV32-NEXT: vsetvli zero, zero, e16, m1, tu, mu +; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (a0), v26, v0.t ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret @@ -1421,7 +1397,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vsext.vf8 v28, v8 ; RV64-NEXT: vadd.vv v28, v28, v28 -; RV64-NEXT: vsetvli zero, zero, e16, m1, tu, mu +; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV64-NEXT: vluxei64.v v9, (a0), v28, v0.t ; RV64-NEXT: vmv1r.v v8, v9 ; RV64-NEXT: ret @@ -1437,7 +1413,7 @@ ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV32-NEXT: vzext.vf4 v26, v8 ; RV32-NEXT: vadd.vv v26, v26, v26 -; RV32-NEXT: vsetvli zero, zero, e16, m1, tu, mu +; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (a0), v26, v0.t ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret @@ -1447,7 +1423,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vzext.vf8 v28, v8 ; RV64-NEXT: vadd.vv v28, v28, v28 -; RV64-NEXT: vsetvli zero, zero, e16, m1, tu, mu +; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV64-NEXT: vluxei64.v v9, (a0), v28, v0.t ; RV64-NEXT: vmv1r.v v8, v9 ; RV64-NEXT: ret @@ -1463,7 +1439,7 @@ ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV32-NEXT: vsext.vf2 v26, v8 ; RV32-NEXT: vadd.vv v26, v26, v26 -; RV32-NEXT: vsetvli zero, zero, e16, m1, tu, mu +; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (a0), v26, v0.t ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret @@ -1473,7 +1449,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vsext.vf4 v28, v8 ; RV64-NEXT: vadd.vv v28, v28, v28 -; RV64-NEXT: vsetvli zero, zero, e16, m1, tu, mu +; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV64-NEXT: vluxei64.v v9, (a0), v28, v0.t ; RV64-NEXT: vmv1r.v v8, v9 ; RV64-NEXT: ret @@ -1487,14 +1463,14 @@ define <1 x float> @mgather_v1f32(<1 x float*> %ptrs, <1 x i1> %m, <1 x float> %passthru) { ; RV32-LABEL: mgather_v1f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 1, e32, mf2, tu, mu +; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v1f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e32, mf2, tu, mu +; RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v9 ; RV64-NEXT: ret @@ -1507,14 +1483,14 @@ define <2 x float> @mgather_v2f32(<2 x float*> %ptrs, <2 x i1> %m, <2 x float> %passthru) { ; RV32-LABEL: mgather_v2f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 2, e32, mf2, tu, mu +; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v2f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e32, mf2, tu, mu +; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v9 ; RV64-NEXT: ret @@ -1527,14 +1503,14 @@ define <4 x float> @mgather_v4f32(<4 x float*> %ptrs, <4 x i1> %m, <4 x float> %passthru) { ; RV32-LABEL: mgather_v4f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 4, e32, m1, tu, mu +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v4f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 4, e32, m1, tu, mu +; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v10 ; RV64-NEXT: ret @@ -1580,14 +1556,14 @@ define <8 x float> @mgather_v8f32(<8 x float*> %ptrs, <8 x i1> %m, <8 x float> %passthru) { ; RV32-LABEL: mgather_v8f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, tu, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t ; RV32-NEXT: vmv2r.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v8f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e32, m2, tu, mu +; RV64-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t ; RV64-NEXT: vmv2r.v v8, v12 ; RV64-NEXT: ret @@ -1601,7 +1577,6 @@ ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV32-NEXT: vsext.vf4 v26, v8 ; RV32-NEXT: vsll.vi v26, v26, 2 -; RV32-NEXT: vsetvli zero, zero, e32, m2, tu, mu ; RV32-NEXT: vluxei32.v v10, (a0), v26, v0.t ; RV32-NEXT: vmv2r.v v8, v10 ; RV32-NEXT: ret @@ -1611,7 +1586,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vsext.vf8 v28, v8 ; RV64-NEXT: vsll.vi v28, v28, 2 -; RV64-NEXT: vsetvli zero, zero, e32, m2, tu, mu +; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v28, v0.t ; RV64-NEXT: vmv2r.v v8, v10 ; RV64-NEXT: ret @@ -1626,7 +1601,6 @@ ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV32-NEXT: vsext.vf4 v26, v8 ; RV32-NEXT: vsll.vi v26, v26, 2 -; RV32-NEXT: vsetvli zero, zero, e32, m2, tu, mu ; RV32-NEXT: vluxei32.v v10, (a0), v26, v0.t ; RV32-NEXT: vmv2r.v v8, v10 ; RV32-NEXT: ret @@ -1636,7 +1610,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vsext.vf8 v28, v8 ; RV64-NEXT: vsll.vi v28, v28, 2 -; RV64-NEXT: vsetvli zero, zero, e32, m2, tu, mu +; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v28, v0.t ; RV64-NEXT: vmv2r.v v8, v10 ; RV64-NEXT: ret @@ -1652,7 +1626,6 @@ ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV32-NEXT: vzext.vf4 v26, v8 ; RV32-NEXT: vsll.vi v26, v26, 2 -; RV32-NEXT: vsetvli zero, zero, e32, m2, tu, mu ; RV32-NEXT: vluxei32.v v10, (a0), v26, v0.t ; RV32-NEXT: vmv2r.v v8, v10 ; RV32-NEXT: ret @@ -1662,7 +1635,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vzext.vf8 v28, v8 ; RV64-NEXT: vsll.vi v28, v28, 2 -; RV64-NEXT: vsetvli zero, zero, e32, m2, tu, mu +; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v28, v0.t ; RV64-NEXT: vmv2r.v v8, v10 ; RV64-NEXT: ret @@ -1678,7 +1651,6 @@ ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV32-NEXT: vsext.vf2 v26, v8 ; RV32-NEXT: vsll.vi v26, v26, 2 -; RV32-NEXT: vsetvli zero, zero, e32, m2, tu, mu ; RV32-NEXT: vluxei32.v v10, (a0), v26, v0.t ; RV32-NEXT: vmv2r.v v8, v10 ; RV32-NEXT: ret @@ -1688,7 +1660,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vsext.vf4 v28, v8 ; RV64-NEXT: vsll.vi v28, v28, 2 -; RV64-NEXT: vsetvli zero, zero, e32, m2, tu, mu +; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v28, v0.t ; RV64-NEXT: vmv2r.v v8, v10 ; RV64-NEXT: ret @@ -1703,7 +1675,6 @@ ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV32-NEXT: vsext.vf2 v26, v8 ; RV32-NEXT: vsll.vi v26, v26, 2 -; RV32-NEXT: vsetvli zero, zero, e32, m2, tu, mu ; RV32-NEXT: vluxei32.v v10, (a0), v26, v0.t ; RV32-NEXT: vmv2r.v v8, v10 ; RV32-NEXT: ret @@ -1713,7 +1684,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vsext.vf4 v28, v8 ; RV64-NEXT: vsll.vi v28, v28, 2 -; RV64-NEXT: vsetvli zero, zero, e32, m2, tu, mu +; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v28, v0.t ; RV64-NEXT: vmv2r.v v8, v10 ; RV64-NEXT: ret @@ -1729,7 +1700,6 @@ ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV32-NEXT: vzext.vf2 v26, v8 ; RV32-NEXT: vsll.vi v26, v26, 2 -; RV32-NEXT: vsetvli zero, zero, e32, m2, tu, mu ; RV32-NEXT: vluxei32.v v10, (a0), v26, v0.t ; RV32-NEXT: vmv2r.v v8, v10 ; RV32-NEXT: ret @@ -1739,7 +1709,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vzext.vf4 v28, v8 ; RV64-NEXT: vsll.vi v28, v28, 2 -; RV64-NEXT: vsetvli zero, zero, e32, m2, tu, mu +; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v28, v0.t ; RV64-NEXT: vmv2r.v v8, v10 ; RV64-NEXT: ret @@ -1754,7 +1724,6 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV32-NEXT: vsll.vi v26, v8, 2 -; RV32-NEXT: vsetvli zero, zero, e32, m2, tu, mu ; RV32-NEXT: vluxei32.v v10, (a0), v26, v0.t ; RV32-NEXT: vmv2r.v v8, v10 ; RV32-NEXT: ret @@ -1764,7 +1733,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vsext.vf2 v28, v8 ; RV64-NEXT: vsll.vi v28, v28, 2 -; RV64-NEXT: vsetvli zero, zero, e32, m2, tu, mu +; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v28, v0.t ; RV64-NEXT: vmv2r.v v8, v10 ; RV64-NEXT: ret @@ -1778,14 +1747,14 @@ define <1 x double> @mgather_v1f64(<1 x double*> %ptrs, <1 x i1> %m, <1 x double> %passthru) { ; RV32-LABEL: mgather_v1f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 1, e64, m1, tu, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v1f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, tu, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v9 ; RV64-NEXT: ret @@ -1798,14 +1767,14 @@ define <2 x double> @mgather_v2f64(<2 x double*> %ptrs, <2 x i1> %m, <2 x double> %passthru) { ; RV32-LABEL: mgather_v2f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 2, e64, m1, tu, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v2f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e64, m1, tu, mu +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v9 ; RV64-NEXT: ret @@ -1818,14 +1787,14 @@ define <4 x double> @mgather_v4f64(<4 x double*> %ptrs, <4 x i1> %m, <4 x double> %passthru) { ; RV32-LABEL: mgather_v4f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 4, e64, m2, tu, mu +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t ; RV32-NEXT: vmv2r.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v4f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 4, e64, m2, tu, mu +; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vmv2r.v v8, v10 ; RV64-NEXT: ret @@ -1871,14 +1840,14 @@ define <8 x double> @mgather_v8f64(<8 x double*> %ptrs, <8 x i1> %m, <8 x double> %passthru) { ; RV32-LABEL: mgather_v8f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e64, m4, tu, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t ; RV32-NEXT: vmv4r.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, tu, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t ; RV64-NEXT: vmv4r.v v8, v12 ; RV64-NEXT: ret @@ -1892,7 +1861,7 @@ ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV32-NEXT: vsext.vf4 v26, v8 ; RV32-NEXT: vsll.vi v26, v26, 3 -; RV32-NEXT: vsetvli zero, zero, e64, m4, tu, mu +; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (a0), v26, v0.t ; RV32-NEXT: vmv4r.v v8, v12 ; RV32-NEXT: ret @@ -1902,7 +1871,6 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vsext.vf8 v28, v8 ; RV64-NEXT: vsll.vi v28, v28, 3 -; RV64-NEXT: vsetvli zero, zero, e64, m4, tu, mu ; RV64-NEXT: vluxei64.v v12, (a0), v28, v0.t ; RV64-NEXT: vmv4r.v v8, v12 ; RV64-NEXT: ret @@ -1917,7 +1885,6 @@ ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vsext.vf8 v28, v8 ; RV32-NEXT: vsll.vi v28, v28, 3 -; RV32-NEXT: vsetvli zero, zero, e64, m4, tu, mu ; RV32-NEXT: vluxei64.v v12, (a0), v28, v0.t ; RV32-NEXT: vmv4r.v v8, v12 ; RV32-NEXT: ret @@ -1927,7 +1894,6 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vsext.vf8 v28, v8 ; RV64-NEXT: vsll.vi v28, v28, 3 -; RV64-NEXT: vsetvli zero, zero, e64, m4, tu, mu ; RV64-NEXT: vluxei64.v v12, (a0), v28, v0.t ; RV64-NEXT: vmv4r.v v8, v12 ; RV64-NEXT: ret @@ -1943,7 +1909,6 @@ ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vzext.vf8 v28, v8 ; RV32-NEXT: vsll.vi v28, v28, 3 -; RV32-NEXT: vsetvli zero, zero, e64, m4, tu, mu ; RV32-NEXT: vluxei64.v v12, (a0), v28, v0.t ; RV32-NEXT: vmv4r.v v8, v12 ; RV32-NEXT: ret @@ -1953,7 +1918,6 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vzext.vf8 v28, v8 ; RV64-NEXT: vsll.vi v28, v28, 3 -; RV64-NEXT: vsetvli zero, zero, e64, m4, tu, mu ; RV64-NEXT: vluxei64.v v12, (a0), v28, v0.t ; RV64-NEXT: vmv4r.v v8, v12 ; RV64-NEXT: ret @@ -1969,7 +1933,7 @@ ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV32-NEXT: vsext.vf2 v26, v8 ; RV32-NEXT: vsll.vi v26, v26, 3 -; RV32-NEXT: vsetvli zero, zero, e64, m4, tu, mu +; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (a0), v26, v0.t ; RV32-NEXT: vmv4r.v v8, v12 ; RV32-NEXT: ret @@ -1979,7 +1943,6 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vsext.vf4 v28, v8 ; RV64-NEXT: vsll.vi v28, v28, 3 -; RV64-NEXT: vsetvli zero, zero, e64, m4, tu, mu ; RV64-NEXT: vluxei64.v v12, (a0), v28, v0.t ; RV64-NEXT: vmv4r.v v8, v12 ; RV64-NEXT: ret @@ -1994,7 +1957,6 @@ ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vsext.vf4 v28, v8 ; RV32-NEXT: vsll.vi v28, v28, 3 -; RV32-NEXT: vsetvli zero, zero, e64, m4, tu, mu ; RV32-NEXT: vluxei64.v v12, (a0), v28, v0.t ; RV32-NEXT: vmv4r.v v8, v12 ; RV32-NEXT: ret @@ -2004,7 +1966,6 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vsext.vf4 v28, v8 ; RV64-NEXT: vsll.vi v28, v28, 3 -; RV64-NEXT: vsetvli zero, zero, e64, m4, tu, mu ; RV64-NEXT: vluxei64.v v12, (a0), v28, v0.t ; RV64-NEXT: vmv4r.v v8, v12 ; RV64-NEXT: ret @@ -2020,7 +1981,6 @@ ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vzext.vf4 v28, v8 ; RV32-NEXT: vsll.vi v28, v28, 3 -; RV32-NEXT: vsetvli zero, zero, e64, m4, tu, mu ; RV32-NEXT: vluxei64.v v12, (a0), v28, v0.t ; RV32-NEXT: vmv4r.v v8, v12 ; RV32-NEXT: ret @@ -2030,7 +1990,6 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vzext.vf4 v28, v8 ; RV64-NEXT: vsll.vi v28, v28, 3 -; RV64-NEXT: vsetvli zero, zero, e64, m4, tu, mu ; RV64-NEXT: vluxei64.v v12, (a0), v28, v0.t ; RV64-NEXT: vmv4r.v v8, v12 ; RV64-NEXT: ret @@ -2045,7 +2004,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV32-NEXT: vsll.vi v26, v8, 3 -; RV32-NEXT: vsetvli zero, zero, e64, m4, tu, mu +; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (a0), v26, v0.t ; RV32-NEXT: vmv4r.v v8, v12 ; RV32-NEXT: ret @@ -2055,7 +2014,6 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vsext.vf2 v28, v8 ; RV64-NEXT: vsll.vi v28, v28, 3 -; RV64-NEXT: vsetvli zero, zero, e64, m4, tu, mu ; RV64-NEXT: vluxei64.v v12, (a0), v28, v0.t ; RV64-NEXT: vmv4r.v v8, v12 ; RV64-NEXT: ret @@ -2070,7 +2028,6 @@ ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vsext.vf2 v28, v8 ; RV32-NEXT: vsll.vi v28, v28, 3 -; RV32-NEXT: vsetvli zero, zero, e64, m4, tu, mu ; RV32-NEXT: vluxei64.v v12, (a0), v28, v0.t ; RV32-NEXT: vmv4r.v v8, v12 ; RV32-NEXT: ret @@ -2080,7 +2037,6 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vsext.vf2 v28, v8 ; RV64-NEXT: vsll.vi v28, v28, 3 -; RV64-NEXT: vsetvli zero, zero, e64, m4, tu, mu ; RV64-NEXT: vluxei64.v v12, (a0), v28, v0.t ; RV64-NEXT: vmv4r.v v8, v12 ; RV64-NEXT: ret @@ -2096,7 +2052,6 @@ ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vzext.vf2 v28, v8 ; RV32-NEXT: vsll.vi v28, v28, 3 -; RV32-NEXT: vsetvli zero, zero, e64, m4, tu, mu ; RV32-NEXT: vluxei64.v v12, (a0), v28, v0.t ; RV32-NEXT: vmv4r.v v8, v12 ; RV32-NEXT: ret @@ -2106,7 +2061,6 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vzext.vf2 v28, v8 ; RV64-NEXT: vsll.vi v28, v28, 3 -; RV64-NEXT: vsetvli zero, zero, e64, m4, tu, mu ; RV64-NEXT: vluxei64.v v12, (a0), v28, v0.t ; RV64-NEXT: vmv4r.v v8, v12 ; RV64-NEXT: ret @@ -2121,7 +2075,6 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vsll.vi v28, v8, 3 -; RV32-NEXT: vsetvli zero, zero, e64, m4, tu, mu ; RV32-NEXT: vluxei64.v v12, (a0), v28, v0.t ; RV32-NEXT: vmv4r.v v8, v12 ; RV32-NEXT: ret @@ -2130,7 +2083,6 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vsll.vi v28, v8, 3 -; RV64-NEXT: vsetvli zero, zero, e64, m4, tu, mu ; RV64-NEXT: vluxei64.v v12, (a0), v28, v0.t ; RV64-NEXT: vmv4r.v v8, v12 ; RV64-NEXT: ret @@ -2146,7 +2098,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; RV32-NEXT: vsext.vf4 v28, v8 -; RV32-NEXT: vsetvli zero, zero, e8, m1, tu, mu +; RV32-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (a0), v28, v0.t ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret @@ -2155,7 +2107,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vsext.vf8 v16, v8 -; RV64-NEXT: vsetvli zero, zero, e8, m1, tu, mu +; RV64-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; RV64-NEXT: vluxei64.v v9, (a0), v16, v0.t ; RV64-NEXT: vmv1r.v v8, v9 ; RV64-NEXT: ret @@ -2172,7 +2124,7 @@ ; RV32-NEXT: addi a1, zero, 32 ; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; RV32-NEXT: vsext.vf4 v16, v8 -; RV32-NEXT: vsetvli zero, zero, e8, m2, tu, mu +; RV32-NEXT: vsetvli zero, zero, e8, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (a0), v16, v0.t ; RV32-NEXT: vmv2r.v v8, v10 ; RV32-NEXT: ret @@ -2187,11 +2139,11 @@ ; RV64-NEXT: vsext.vf8 v16, v28 ; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, mu ; RV64-NEXT: vslidedown.vi v0, v0, 2 -; RV64-NEXT: vsetivli zero, 16, e8, m1, tu, mu +; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; RV64-NEXT: vluxei64.v v26, (a0), v16, v0.t ; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV64-NEXT: vsext.vf8 v16, v8 -; RV64-NEXT: vsetvli zero, zero, e8, m1, tu, mu +; RV64-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; RV64-NEXT: vmv1r.v v0, v25 ; RV64-NEXT: vluxei64.v v10, (a0), v16, v0.t ; RV64-NEXT: addi a0, zero, 32 diff --git a/llvm/test/CodeGen/RISCV/rvv/interleave-crash.ll b/llvm/test/CodeGen/RISCV/rvv/interleave-crash.ll --- a/llvm/test/CodeGen/RISCV/rvv/interleave-crash.ll +++ b/llvm/test/CodeGen/RISCV/rvv/interleave-crash.ll @@ -46,9 +46,8 @@ ; RV64-1024-NEXT: vslideup.vi v0, v25, 2 ; RV64-1024-NEXT: vsetivli zero, 4, e64, m1, tu, mu ; RV64-1024-NEXT: vslideup.vi v0, v25, 3 -; RV64-1024-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; RV64-1024-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; RV64-1024-NEXT: vrgather.vv v12, v28, v8, v0.t -; RV64-1024-NEXT: vsetvli zero, zero, e16, m4, ta, mu ; RV64-1024-NEXT: vse16.v v12, (a0) ; RV64-1024-NEXT: ret ; @@ -95,9 +94,8 @@ ; RV64-2048-NEXT: vslideup.vi v0, v25, 2 ; RV64-2048-NEXT: vsetivli zero, 4, e64, m1, tu, mu ; RV64-2048-NEXT: vslideup.vi v0, v25, 3 -; RV64-2048-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; RV64-2048-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; RV64-2048-NEXT: vrgather.vv v30, v26, v28, v0.t -; RV64-2048-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV64-2048-NEXT: vse16.v v30, (a0) ; RV64-2048-NEXT: ret entry: @@ -208,14 +206,13 @@ ; RV64-1024-NEXT: vslideup.vi v0, v25, 6 ; RV64-1024-NEXT: vsetivli zero, 8, e64, m1, tu, mu ; RV64-1024-NEXT: vslideup.vi v0, v25, 7 -; RV64-1024-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; RV64-1024-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; RV64-1024-NEXT: csrr a1, vlenb ; RV64-1024-NEXT: slli a1, a1, 5 ; RV64-1024-NEXT: add a1, sp, a1 ; RV64-1024-NEXT: addi a1, a1, 16 ; RV64-1024-NEXT: vl8re8.v v24, (a1) # Unknown-size Folded Reload ; RV64-1024-NEXT: vrgather.vv v8, v16, v24, v0.t -; RV64-1024-NEXT: vsetvli zero, zero, e16, m8, ta, mu ; RV64-1024-NEXT: vse16.v v8, (a0) ; RV64-1024-NEXT: csrr a0, vlenb ; RV64-1024-NEXT: addi a1, zero, 40 @@ -275,9 +272,8 @@ ; RV64-2048-NEXT: vslideup.vi v0, v25, 6 ; RV64-2048-NEXT: vsetivli zero, 8, e64, m1, tu, mu ; RV64-2048-NEXT: vslideup.vi v0, v25, 7 -; RV64-2048-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; RV64-2048-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; RV64-2048-NEXT: vrgather.vv v12, v28, v8, v0.t -; RV64-2048-NEXT: vsetvli zero, zero, e16, m4, ta, mu ; RV64-2048-NEXT: vse16.v v12, (a0) ; RV64-2048-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll @@ -9,14 +9,14 @@ define @mgather_nxv1i8( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv1i8: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e8, mf8, tu, mu +; RV32-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv1i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e8, mf8, tu, mu +; RV64-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v9 ; RV64-NEXT: ret @@ -29,14 +29,14 @@ define @mgather_nxv2i8( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv2i8: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e8, mf4, tu, mu +; RV32-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv2i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e8, mf4, tu, mu +; RV64-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v10 ; RV64-NEXT: ret @@ -47,7 +47,7 @@ define @mgather_nxv2i8_sextload_nxv2i16( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv2i8_sextload_nxv2i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e8, mf4, tu, mu +; RV32-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; RV32-NEXT: vsext.vf2 v8, v9 @@ -55,7 +55,7 @@ ; ; RV64-LABEL: mgather_nxv2i8_sextload_nxv2i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e8, mf4, tu, mu +; RV64-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; RV64-NEXT: vsext.vf2 v8, v10 @@ -68,7 +68,7 @@ define @mgather_nxv2i8_zextload_nxv2i16( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv2i8_zextload_nxv2i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e8, mf4, tu, mu +; RV32-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; RV32-NEXT: vzext.vf2 v8, v9 @@ -76,7 +76,7 @@ ; ; RV64-LABEL: mgather_nxv2i8_zextload_nxv2i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e8, mf4, tu, mu +; RV64-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; RV64-NEXT: vzext.vf2 v8, v10 @@ -89,7 +89,7 @@ define @mgather_nxv2i8_sextload_nxv2i32( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv2i8_sextload_nxv2i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e8, mf4, tu, mu +; RV32-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; RV32-NEXT: vsext.vf4 v8, v9 @@ -97,7 +97,7 @@ ; ; RV64-LABEL: mgather_nxv2i8_sextload_nxv2i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e8, mf4, tu, mu +; RV64-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; RV64-NEXT: vsext.vf4 v8, v10 @@ -110,7 +110,7 @@ define @mgather_nxv2i8_zextload_nxv2i32( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv2i8_zextload_nxv2i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e8, mf4, tu, mu +; RV32-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; RV32-NEXT: vzext.vf4 v8, v9 @@ -118,7 +118,7 @@ ; ; RV64-LABEL: mgather_nxv2i8_zextload_nxv2i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e8, mf4, tu, mu +; RV64-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; RV64-NEXT: vzext.vf4 v8, v10 @@ -131,7 +131,7 @@ define @mgather_nxv2i8_sextload_nxv2i64( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv2i8_sextload_nxv2i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e8, mf4, tu, mu +; RV32-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; RV32-NEXT: vsext.vf8 v26, v9 @@ -140,7 +140,7 @@ ; ; RV64-LABEL: mgather_nxv2i8_sextload_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e8, mf4, tu, mu +; RV64-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; RV64-NEXT: vsext.vf8 v8, v10 @@ -153,7 +153,7 @@ define @mgather_nxv2i8_zextload_nxv2i64( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv2i8_zextload_nxv2i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e8, mf4, tu, mu +; RV32-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; RV32-NEXT: vzext.vf8 v26, v9 @@ -162,7 +162,7 @@ ; ; RV64-LABEL: mgather_nxv2i8_zextload_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e8, mf4, tu, mu +; RV64-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; RV64-NEXT: vzext.vf8 v8, v10 @@ -177,14 +177,14 @@ define @mgather_nxv4i8( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv4i8: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e8, mf2, tu, mu +; RV32-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv4i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e8, mf2, tu, mu +; RV64-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v12 ; RV64-NEXT: ret @@ -231,14 +231,14 @@ define @mgather_nxv8i8( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv8i8: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e8, m1, tu, mu +; RV32-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv8i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e8, m1, tu, mu +; RV64-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; RV64-NEXT: vluxei64.v v16, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v16 ; RV64-NEXT: ret @@ -251,7 +251,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; RV32-NEXT: vsext.vf4 v28, v8 -; RV32-NEXT: vsetvli zero, zero, e8, m1, tu, mu +; RV32-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (a0), v28, v0.t ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret @@ -260,7 +260,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vsext.vf8 v16, v8 -; RV64-NEXT: vsetvli zero, zero, e8, m1, tu, mu +; RV64-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; RV64-NEXT: vluxei64.v v9, (a0), v16, v0.t ; RV64-NEXT: vmv1r.v v8, v9 ; RV64-NEXT: ret @@ -274,14 +274,14 @@ define @mgather_nxv1i16( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv1i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e16, mf4, tu, mu +; RV32-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv1i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e16, mf4, tu, mu +; RV64-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v9 ; RV64-NEXT: ret @@ -294,14 +294,14 @@ define @mgather_nxv2i16( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv2i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e16, mf2, tu, mu +; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv2i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e16, mf2, tu, mu +; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v10 ; RV64-NEXT: ret @@ -312,7 +312,7 @@ define @mgather_nxv2i16_sextload_nxv2i32( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv2i16_sextload_nxv2i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e16, mf2, tu, mu +; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; RV32-NEXT: vsext.vf2 v8, v9 @@ -320,7 +320,7 @@ ; ; RV64-LABEL: mgather_nxv2i16_sextload_nxv2i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e16, mf2, tu, mu +; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; RV64-NEXT: vsext.vf2 v8, v10 @@ -333,7 +333,7 @@ define @mgather_nxv2i16_zextload_nxv2i32( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv2i16_zextload_nxv2i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e16, mf2, tu, mu +; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; RV32-NEXT: vzext.vf2 v8, v9 @@ -341,7 +341,7 @@ ; ; RV64-LABEL: mgather_nxv2i16_zextload_nxv2i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e16, mf2, tu, mu +; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; RV64-NEXT: vzext.vf2 v8, v10 @@ -354,7 +354,7 @@ define @mgather_nxv2i16_sextload_nxv2i64( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv2i16_sextload_nxv2i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e16, mf2, tu, mu +; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; RV32-NEXT: vsext.vf4 v26, v9 @@ -363,7 +363,7 @@ ; ; RV64-LABEL: mgather_nxv2i16_sextload_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e16, mf2, tu, mu +; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; RV64-NEXT: vsext.vf4 v8, v10 @@ -376,7 +376,7 @@ define @mgather_nxv2i16_zextload_nxv2i64( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv2i16_zextload_nxv2i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e16, mf2, tu, mu +; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; RV32-NEXT: vzext.vf4 v26, v9 @@ -385,7 +385,7 @@ ; ; RV64-LABEL: mgather_nxv2i16_zextload_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e16, mf2, tu, mu +; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; RV64-NEXT: vzext.vf4 v8, v10 @@ -400,14 +400,14 @@ define @mgather_nxv4i16( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv4i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e16, m1, tu, mu +; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv4i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e16, m1, tu, mu +; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v12 ; RV64-NEXT: ret @@ -454,14 +454,14 @@ define @mgather_nxv8i16( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv8i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e16, m2, tu, mu +; RV32-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t ; RV32-NEXT: vmv2r.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv8i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e16, m2, tu, mu +; RV64-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; RV64-NEXT: vluxei64.v v16, (zero), v8, v0.t ; RV64-NEXT: vmv2r.v v8, v16 ; RV64-NEXT: ret @@ -475,7 +475,7 @@ ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; RV32-NEXT: vsext.vf4 v28, v8 ; RV32-NEXT: vadd.vv v28, v28, v28 -; RV32-NEXT: vsetvli zero, zero, e16, m2, tu, mu +; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (a0), v28, v0.t ; RV32-NEXT: vmv2r.v v8, v10 ; RV32-NEXT: ret @@ -485,7 +485,7 @@ ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vsext.vf8 v16, v8 ; RV64-NEXT: vadd.vv v16, v16, v16 -; RV64-NEXT: vsetvli zero, zero, e16, m2, tu, mu +; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v16, v0.t ; RV64-NEXT: vmv2r.v v8, v10 ; RV64-NEXT: ret @@ -500,7 +500,7 @@ ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; RV32-NEXT: vsext.vf4 v28, v8 ; RV32-NEXT: vadd.vv v28, v28, v28 -; RV32-NEXT: vsetvli zero, zero, e16, m2, tu, mu +; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (a0), v28, v0.t ; RV32-NEXT: vmv2r.v v8, v10 ; RV32-NEXT: ret @@ -510,7 +510,7 @@ ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vsext.vf8 v16, v8 ; RV64-NEXT: vadd.vv v16, v16, v16 -; RV64-NEXT: vsetvli zero, zero, e16, m2, tu, mu +; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v16, v0.t ; RV64-NEXT: vmv2r.v v8, v10 ; RV64-NEXT: ret @@ -526,7 +526,7 @@ ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; RV32-NEXT: vzext.vf4 v28, v8 ; RV32-NEXT: vadd.vv v28, v28, v28 -; RV32-NEXT: vsetvli zero, zero, e16, m2, tu, mu +; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (a0), v28, v0.t ; RV32-NEXT: vmv2r.v v8, v10 ; RV32-NEXT: ret @@ -536,7 +536,7 @@ ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vzext.vf8 v16, v8 ; RV64-NEXT: vadd.vv v16, v16, v16 -; RV64-NEXT: vsetvli zero, zero, e16, m2, tu, mu +; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v16, v0.t ; RV64-NEXT: vmv2r.v v8, v10 ; RV64-NEXT: ret @@ -552,7 +552,7 @@ ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; RV32-NEXT: vsext.vf2 v28, v8 ; RV32-NEXT: vadd.vv v28, v28, v28 -; RV32-NEXT: vsetvli zero, zero, e16, m2, tu, mu +; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (a0), v28, v0.t ; RV32-NEXT: vmv2r.v v8, v10 ; RV32-NEXT: ret @@ -562,7 +562,7 @@ ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vsext.vf4 v16, v8 ; RV64-NEXT: vadd.vv v16, v16, v16 -; RV64-NEXT: vsetvli zero, zero, e16, m2, tu, mu +; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v16, v0.t ; RV64-NEXT: vmv2r.v v8, v10 ; RV64-NEXT: ret @@ -576,14 +576,14 @@ define @mgather_nxv1i32( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv1i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e32, mf2, tu, mu +; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv1i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e32, mf2, tu, mu +; RV64-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v9 ; RV64-NEXT: ret @@ -596,14 +596,14 @@ define @mgather_nxv2i32( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv2i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e32, m1, tu, mu +; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv2i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e32, m1, tu, mu +; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v10 ; RV64-NEXT: ret @@ -614,7 +614,7 @@ define @mgather_nxv2i32_sextload_nxv2i64( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv2i32_sextload_nxv2i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e32, m1, tu, mu +; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; RV32-NEXT: vsext.vf2 v26, v9 @@ -623,7 +623,7 @@ ; ; RV64-LABEL: mgather_nxv2i32_sextload_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e32, m1, tu, mu +; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; RV64-NEXT: vsext.vf2 v8, v10 @@ -636,7 +636,7 @@ define @mgather_nxv2i32_zextload_nxv2i64( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv2i32_zextload_nxv2i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e32, m1, tu, mu +; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; RV32-NEXT: vzext.vf2 v26, v9 @@ -645,7 +645,7 @@ ; ; RV64-LABEL: mgather_nxv2i32_zextload_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e32, m1, tu, mu +; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; RV64-NEXT: vzext.vf2 v8, v10 @@ -660,14 +660,14 @@ define @mgather_nxv4i32( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv4i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e32, m2, tu, mu +; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t ; RV32-NEXT: vmv2r.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv4i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e32, m2, tu, mu +; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t ; RV64-NEXT: vmv2r.v v8, v12 ; RV64-NEXT: ret @@ -713,14 +713,14 @@ define @mgather_nxv8i32( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv8i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e32, m4, tu, mu +; RV32-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t ; RV32-NEXT: vmv4r.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv8i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e32, m4, tu, mu +; RV64-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; RV64-NEXT: vluxei64.v v16, (zero), v8, v0.t ; RV64-NEXT: vmv4r.v v8, v16 ; RV64-NEXT: ret @@ -734,7 +734,6 @@ ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; RV32-NEXT: vsext.vf4 v28, v8 ; RV32-NEXT: vsll.vi v28, v28, 2 -; RV32-NEXT: vsetvli zero, zero, e32, m4, tu, mu ; RV32-NEXT: vluxei32.v v12, (a0), v28, v0.t ; RV32-NEXT: vmv4r.v v8, v12 ; RV32-NEXT: ret @@ -744,7 +743,7 @@ ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vsext.vf8 v16, v8 ; RV64-NEXT: vsll.vi v16, v16, 2 -; RV64-NEXT: vsetvli zero, zero, e32, m4, tu, mu +; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t ; RV64-NEXT: vmv4r.v v8, v12 ; RV64-NEXT: ret @@ -759,7 +758,6 @@ ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; RV32-NEXT: vsext.vf4 v28, v8 ; RV32-NEXT: vsll.vi v28, v28, 2 -; RV32-NEXT: vsetvli zero, zero, e32, m4, tu, mu ; RV32-NEXT: vluxei32.v v12, (a0), v28, v0.t ; RV32-NEXT: vmv4r.v v8, v12 ; RV32-NEXT: ret @@ -769,7 +767,7 @@ ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vsext.vf8 v16, v8 ; RV64-NEXT: vsll.vi v16, v16, 2 -; RV64-NEXT: vsetvli zero, zero, e32, m4, tu, mu +; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t ; RV64-NEXT: vmv4r.v v8, v12 ; RV64-NEXT: ret @@ -785,7 +783,6 @@ ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; RV32-NEXT: vzext.vf4 v28, v8 ; RV32-NEXT: vsll.vi v28, v28, 2 -; RV32-NEXT: vsetvli zero, zero, e32, m4, tu, mu ; RV32-NEXT: vluxei32.v v12, (a0), v28, v0.t ; RV32-NEXT: vmv4r.v v8, v12 ; RV32-NEXT: ret @@ -795,7 +792,7 @@ ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vzext.vf8 v16, v8 ; RV64-NEXT: vsll.vi v16, v16, 2 -; RV64-NEXT: vsetvli zero, zero, e32, m4, tu, mu +; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t ; RV64-NEXT: vmv4r.v v8, v12 ; RV64-NEXT: ret @@ -811,7 +808,6 @@ ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; RV32-NEXT: vsext.vf2 v28, v8 ; RV32-NEXT: vsll.vi v28, v28, 2 -; RV32-NEXT: vsetvli zero, zero, e32, m4, tu, mu ; RV32-NEXT: vluxei32.v v12, (a0), v28, v0.t ; RV32-NEXT: vmv4r.v v8, v12 ; RV32-NEXT: ret @@ -821,7 +817,7 @@ ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vsext.vf4 v16, v8 ; RV64-NEXT: vsll.vi v16, v16, 2 -; RV64-NEXT: vsetvli zero, zero, e32, m4, tu, mu +; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t ; RV64-NEXT: vmv4r.v v8, v12 ; RV64-NEXT: ret @@ -836,7 +832,6 @@ ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; RV32-NEXT: vsext.vf2 v28, v8 ; RV32-NEXT: vsll.vi v28, v28, 2 -; RV32-NEXT: vsetvli zero, zero, e32, m4, tu, mu ; RV32-NEXT: vluxei32.v v12, (a0), v28, v0.t ; RV32-NEXT: vmv4r.v v8, v12 ; RV32-NEXT: ret @@ -846,7 +841,7 @@ ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vsext.vf4 v16, v8 ; RV64-NEXT: vsll.vi v16, v16, 2 -; RV64-NEXT: vsetvli zero, zero, e32, m4, tu, mu +; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t ; RV64-NEXT: vmv4r.v v8, v12 ; RV64-NEXT: ret @@ -862,7 +857,6 @@ ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; RV32-NEXT: vzext.vf2 v28, v8 ; RV32-NEXT: vsll.vi v28, v28, 2 -; RV32-NEXT: vsetvli zero, zero, e32, m4, tu, mu ; RV32-NEXT: vluxei32.v v12, (a0), v28, v0.t ; RV32-NEXT: vmv4r.v v8, v12 ; RV32-NEXT: ret @@ -872,7 +866,7 @@ ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vzext.vf4 v16, v8 ; RV64-NEXT: vsll.vi v16, v16, 2 -; RV64-NEXT: vsetvli zero, zero, e32, m4, tu, mu +; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t ; RV64-NEXT: vmv4r.v v8, v12 ; RV64-NEXT: ret @@ -887,7 +881,6 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; RV32-NEXT: vsll.vi v28, v8, 2 -; RV32-NEXT: vsetvli zero, zero, e32, m4, tu, mu ; RV32-NEXT: vluxei32.v v12, (a0), v28, v0.t ; RV32-NEXT: vmv4r.v v8, v12 ; RV32-NEXT: ret @@ -897,7 +890,7 @@ ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vsext.vf2 v16, v8 ; RV64-NEXT: vsll.vi v16, v16, 2 -; RV64-NEXT: vsetvli zero, zero, e32, m4, tu, mu +; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t ; RV64-NEXT: vmv4r.v v8, v12 ; RV64-NEXT: ret @@ -911,14 +904,14 @@ define @mgather_nxv1i64( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv1i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e64, m1, tu, mu +; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e64, m1, tu, mu +; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v9 ; RV64-NEXT: ret @@ -931,14 +924,14 @@ define @mgather_nxv2i64( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv2i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e64, m2, tu, mu +; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t ; RV32-NEXT: vmv2r.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e64, m2, tu, mu +; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vmv2r.v v8, v10 ; RV64-NEXT: ret @@ -951,14 +944,14 @@ define @mgather_nxv4i64( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv4i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e64, m4, tu, mu +; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t ; RV32-NEXT: vmv4r.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e64, m4, tu, mu +; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t ; RV64-NEXT: vmv4r.v v8, v12 ; RV64-NEXT: ret @@ -1004,14 +997,14 @@ define @mgather_nxv8i64( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e64, m8, tu, mu +; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (zero), v8, v0.t ; RV32-NEXT: vmv8r.v v8, v16 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e64, m8, tu, mu +; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV64-NEXT: vluxei64.v v16, (zero), v8, v0.t ; RV64-NEXT: vmv8r.v v8, v16 ; RV64-NEXT: ret @@ -1025,7 +1018,7 @@ ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; RV32-NEXT: vsext.vf4 v28, v8 ; RV32-NEXT: vsll.vi v28, v28, 3 -; RV32-NEXT: vsetvli zero, zero, e64, m8, tu, mu +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v28, v0.t ; RV32-NEXT: vmv8r.v v8, v16 ; RV32-NEXT: ret @@ -1035,7 +1028,6 @@ ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vsext.vf8 v24, v8 ; RV64-NEXT: vsll.vi v8, v24, 3 -; RV64-NEXT: vsetvli zero, zero, e64, m8, tu, mu ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t ; RV64-NEXT: vmv8r.v v8, v16 ; RV64-NEXT: ret @@ -1050,7 +1042,6 @@ ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vsext.vf8 v24, v8 ; RV32-NEXT: vsll.vi v8, v24, 3 -; RV32-NEXT: vsetvli zero, zero, e64, m8, tu, mu ; RV32-NEXT: vluxei64.v v16, (a0), v8, v0.t ; RV32-NEXT: vmv8r.v v8, v16 ; RV32-NEXT: ret @@ -1060,7 +1051,6 @@ ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vsext.vf8 v24, v8 ; RV64-NEXT: vsll.vi v8, v24, 3 -; RV64-NEXT: vsetvli zero, zero, e64, m8, tu, mu ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t ; RV64-NEXT: vmv8r.v v8, v16 ; RV64-NEXT: ret @@ -1076,7 +1066,6 @@ ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vzext.vf8 v24, v8 ; RV32-NEXT: vsll.vi v8, v24, 3 -; RV32-NEXT: vsetvli zero, zero, e64, m8, tu, mu ; RV32-NEXT: vluxei64.v v16, (a0), v8, v0.t ; RV32-NEXT: vmv8r.v v8, v16 ; RV32-NEXT: ret @@ -1086,7 +1075,6 @@ ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vzext.vf8 v24, v8 ; RV64-NEXT: vsll.vi v8, v24, 3 -; RV64-NEXT: vsetvli zero, zero, e64, m8, tu, mu ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t ; RV64-NEXT: vmv8r.v v8, v16 ; RV64-NEXT: ret @@ -1102,7 +1090,7 @@ ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; RV32-NEXT: vsext.vf2 v28, v8 ; RV32-NEXT: vsll.vi v28, v28, 3 -; RV32-NEXT: vsetvli zero, zero, e64, m8, tu, mu +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v28, v0.t ; RV32-NEXT: vmv8r.v v8, v16 ; RV32-NEXT: ret @@ -1112,7 +1100,6 @@ ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vsext.vf4 v24, v8 ; RV64-NEXT: vsll.vi v8, v24, 3 -; RV64-NEXT: vsetvli zero, zero, e64, m8, tu, mu ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t ; RV64-NEXT: vmv8r.v v8, v16 ; RV64-NEXT: ret @@ -1127,7 +1114,6 @@ ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vsext.vf4 v24, v8 ; RV32-NEXT: vsll.vi v8, v24, 3 -; RV32-NEXT: vsetvli zero, zero, e64, m8, tu, mu ; RV32-NEXT: vluxei64.v v16, (a0), v8, v0.t ; RV32-NEXT: vmv8r.v v8, v16 ; RV32-NEXT: ret @@ -1137,7 +1123,6 @@ ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vsext.vf4 v24, v8 ; RV64-NEXT: vsll.vi v8, v24, 3 -; RV64-NEXT: vsetvli zero, zero, e64, m8, tu, mu ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t ; RV64-NEXT: vmv8r.v v8, v16 ; RV64-NEXT: ret @@ -1153,7 +1138,6 @@ ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vzext.vf4 v24, v8 ; RV32-NEXT: vsll.vi v8, v24, 3 -; RV32-NEXT: vsetvli zero, zero, e64, m8, tu, mu ; RV32-NEXT: vluxei64.v v16, (a0), v8, v0.t ; RV32-NEXT: vmv8r.v v8, v16 ; RV32-NEXT: ret @@ -1163,7 +1147,6 @@ ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vzext.vf4 v24, v8 ; RV64-NEXT: vsll.vi v8, v24, 3 -; RV64-NEXT: vsetvli zero, zero, e64, m8, tu, mu ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t ; RV64-NEXT: vmv8r.v v8, v16 ; RV64-NEXT: ret @@ -1178,7 +1161,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; RV32-NEXT: vsll.vi v28, v8, 3 -; RV32-NEXT: vsetvli zero, zero, e64, m8, tu, mu +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v28, v0.t ; RV32-NEXT: vmv8r.v v8, v16 ; RV32-NEXT: ret @@ -1188,7 +1171,6 @@ ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vsext.vf2 v24, v8 ; RV64-NEXT: vsll.vi v8, v24, 3 -; RV64-NEXT: vsetvli zero, zero, e64, m8, tu, mu ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t ; RV64-NEXT: vmv8r.v v8, v16 ; RV64-NEXT: ret @@ -1203,7 +1185,6 @@ ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vsext.vf2 v24, v8 ; RV32-NEXT: vsll.vi v8, v24, 3 -; RV32-NEXT: vsetvli zero, zero, e64, m8, tu, mu ; RV32-NEXT: vluxei64.v v16, (a0), v8, v0.t ; RV32-NEXT: vmv8r.v v8, v16 ; RV32-NEXT: ret @@ -1213,7 +1194,6 @@ ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vsext.vf2 v24, v8 ; RV64-NEXT: vsll.vi v8, v24, 3 -; RV64-NEXT: vsetvli zero, zero, e64, m8, tu, mu ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t ; RV64-NEXT: vmv8r.v v8, v16 ; RV64-NEXT: ret @@ -1229,7 +1209,6 @@ ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vzext.vf2 v24, v8 ; RV32-NEXT: vsll.vi v8, v24, 3 -; RV32-NEXT: vsetvli zero, zero, e64, m8, tu, mu ; RV32-NEXT: vluxei64.v v16, (a0), v8, v0.t ; RV32-NEXT: vmv8r.v v8, v16 ; RV32-NEXT: ret @@ -1239,7 +1218,6 @@ ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vzext.vf2 v24, v8 ; RV64-NEXT: vsll.vi v8, v24, 3 -; RV64-NEXT: vsetvli zero, zero, e64, m8, tu, mu ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t ; RV64-NEXT: vmv8r.v v8, v16 ; RV64-NEXT: ret @@ -1254,7 +1232,6 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v8, v8, 3 -; RV32-NEXT: vsetvli zero, zero, e64, m8, tu, mu ; RV32-NEXT: vluxei64.v v16, (a0), v8, v0.t ; RV32-NEXT: vmv8r.v v8, v16 ; RV32-NEXT: ret @@ -1263,7 +1240,6 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vsll.vi v8, v8, 3 -; RV64-NEXT: vsetvli zero, zero, e64, m8, tu, mu ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t ; RV64-NEXT: vmv8r.v v8, v16 ; RV64-NEXT: ret @@ -1281,13 +1257,13 @@ ; RV32-LABEL: mgather_nxv16i64: ; RV32: # %bb.0: ; RV32-NEXT: vl8re64.v v24, (a0) -; RV32-NEXT: vsetvli a0, zero, e64, m8, tu, mu +; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (zero), v8, v0.t ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: srli a2, a0, 3 ; RV32-NEXT: vsetvli a3, zero, e8, mf4, ta, mu ; RV32-NEXT: vslidedown.vx v0, v0, a2 -; RV32-NEXT: vsetvli a2, zero, e64, m8, tu, mu +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v24, (zero), v12, v0.t ; RV32-NEXT: slli a0, a0, 3 ; RV32-NEXT: add a0, a1, a0 @@ -1307,13 +1283,13 @@ ; RV64-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill ; RV64-NEXT: vmv8r.v v16, v8 ; RV64-NEXT: vl8re64.v v8, (a1) -; RV64-NEXT: vsetvli a0, zero, e64, m8, tu, mu +; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV64-NEXT: vluxei64.v v24, (zero), v16, v0.t ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: srli a1, a0, 3 ; RV64-NEXT: vsetvli a3, zero, e8, mf4, ta, mu ; RV64-NEXT: vslidedown.vx v0, v0, a1 -; RV64-NEXT: vsetvli a1, zero, e64, m8, tu, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: addi a1, sp, 16 ; RV64-NEXT: vl8re8.v v16, (a1) # Unknown-size Folded Reload ; RV64-NEXT: vluxei64.v v8, (zero), v16, v0.t @@ -1343,14 +1319,14 @@ define @mgather_nxv1f16( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv1f16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e16, mf4, tu, mu +; RV32-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv1f16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e16, mf4, tu, mu +; RV64-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v9 ; RV64-NEXT: ret @@ -1363,14 +1339,14 @@ define @mgather_nxv2f16( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv2f16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e16, mf2, tu, mu +; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv2f16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e16, mf2, tu, mu +; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v10 ; RV64-NEXT: ret @@ -1383,14 +1359,14 @@ define @mgather_nxv4f16( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv4f16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e16, m1, tu, mu +; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv4f16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e16, m1, tu, mu +; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v12 ; RV64-NEXT: ret @@ -1437,14 +1413,14 @@ define @mgather_nxv8f16( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv8f16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e16, m2, tu, mu +; RV32-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t ; RV32-NEXT: vmv2r.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv8f16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e16, m2, tu, mu +; RV64-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; RV64-NEXT: vluxei64.v v16, (zero), v8, v0.t ; RV64-NEXT: vmv2r.v v8, v16 ; RV64-NEXT: ret @@ -1458,7 +1434,7 @@ ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; RV32-NEXT: vsext.vf4 v28, v8 ; RV32-NEXT: vadd.vv v28, v28, v28 -; RV32-NEXT: vsetvli zero, zero, e16, m2, tu, mu +; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (a0), v28, v0.t ; RV32-NEXT: vmv2r.v v8, v10 ; RV32-NEXT: ret @@ -1468,7 +1444,7 @@ ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vsext.vf8 v16, v8 ; RV64-NEXT: vadd.vv v16, v16, v16 -; RV64-NEXT: vsetvli zero, zero, e16, m2, tu, mu +; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v16, v0.t ; RV64-NEXT: vmv2r.v v8, v10 ; RV64-NEXT: ret @@ -1483,7 +1459,7 @@ ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; RV32-NEXT: vsext.vf4 v28, v8 ; RV32-NEXT: vadd.vv v28, v28, v28 -; RV32-NEXT: vsetvli zero, zero, e16, m2, tu, mu +; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (a0), v28, v0.t ; RV32-NEXT: vmv2r.v v8, v10 ; RV32-NEXT: ret @@ -1493,7 +1469,7 @@ ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vsext.vf8 v16, v8 ; RV64-NEXT: vadd.vv v16, v16, v16 -; RV64-NEXT: vsetvli zero, zero, e16, m2, tu, mu +; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v16, v0.t ; RV64-NEXT: vmv2r.v v8, v10 ; RV64-NEXT: ret @@ -1509,7 +1485,7 @@ ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; RV32-NEXT: vzext.vf4 v28, v8 ; RV32-NEXT: vadd.vv v28, v28, v28 -; RV32-NEXT: vsetvli zero, zero, e16, m2, tu, mu +; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (a0), v28, v0.t ; RV32-NEXT: vmv2r.v v8, v10 ; RV32-NEXT: ret @@ -1519,7 +1495,7 @@ ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vzext.vf8 v16, v8 ; RV64-NEXT: vadd.vv v16, v16, v16 -; RV64-NEXT: vsetvli zero, zero, e16, m2, tu, mu +; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v16, v0.t ; RV64-NEXT: vmv2r.v v8, v10 ; RV64-NEXT: ret @@ -1535,7 +1511,7 @@ ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; RV32-NEXT: vsext.vf2 v28, v8 ; RV32-NEXT: vadd.vv v28, v28, v28 -; RV32-NEXT: vsetvli zero, zero, e16, m2, tu, mu +; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (a0), v28, v0.t ; RV32-NEXT: vmv2r.v v8, v10 ; RV32-NEXT: ret @@ -1545,7 +1521,7 @@ ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vsext.vf4 v16, v8 ; RV64-NEXT: vadd.vv v16, v16, v16 -; RV64-NEXT: vsetvli zero, zero, e16, m2, tu, mu +; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v16, v0.t ; RV64-NEXT: vmv2r.v v8, v10 ; RV64-NEXT: ret @@ -1559,14 +1535,14 @@ define @mgather_nxv1f32( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv1f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e32, mf2, tu, mu +; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv1f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e32, mf2, tu, mu +; RV64-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v9 ; RV64-NEXT: ret @@ -1579,14 +1555,14 @@ define @mgather_nxv2f32( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv2f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e32, m1, tu, mu +; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv2f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e32, m1, tu, mu +; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v10 ; RV64-NEXT: ret @@ -1599,14 +1575,14 @@ define @mgather_nxv4f32( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv4f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e32, m2, tu, mu +; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t ; RV32-NEXT: vmv2r.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv4f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e32, m2, tu, mu +; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t ; RV64-NEXT: vmv2r.v v8, v12 ; RV64-NEXT: ret @@ -1652,14 +1628,14 @@ define @mgather_nxv8f32( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv8f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e32, m4, tu, mu +; RV32-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t ; RV32-NEXT: vmv4r.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv8f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e32, m4, tu, mu +; RV64-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; RV64-NEXT: vluxei64.v v16, (zero), v8, v0.t ; RV64-NEXT: vmv4r.v v8, v16 ; RV64-NEXT: ret @@ -1673,7 +1649,6 @@ ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; RV32-NEXT: vsext.vf4 v28, v8 ; RV32-NEXT: vsll.vi v28, v28, 2 -; RV32-NEXT: vsetvli zero, zero, e32, m4, tu, mu ; RV32-NEXT: vluxei32.v v12, (a0), v28, v0.t ; RV32-NEXT: vmv4r.v v8, v12 ; RV32-NEXT: ret @@ -1683,7 +1658,7 @@ ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vsext.vf8 v16, v8 ; RV64-NEXT: vsll.vi v16, v16, 2 -; RV64-NEXT: vsetvli zero, zero, e32, m4, tu, mu +; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t ; RV64-NEXT: vmv4r.v v8, v12 ; RV64-NEXT: ret @@ -1698,7 +1673,6 @@ ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; RV32-NEXT: vsext.vf4 v28, v8 ; RV32-NEXT: vsll.vi v28, v28, 2 -; RV32-NEXT: vsetvli zero, zero, e32, m4, tu, mu ; RV32-NEXT: vluxei32.v v12, (a0), v28, v0.t ; RV32-NEXT: vmv4r.v v8, v12 ; RV32-NEXT: ret @@ -1708,7 +1682,7 @@ ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vsext.vf8 v16, v8 ; RV64-NEXT: vsll.vi v16, v16, 2 -; RV64-NEXT: vsetvli zero, zero, e32, m4, tu, mu +; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t ; RV64-NEXT: vmv4r.v v8, v12 ; RV64-NEXT: ret @@ -1724,7 +1698,6 @@ ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; RV32-NEXT: vzext.vf4 v28, v8 ; RV32-NEXT: vsll.vi v28, v28, 2 -; RV32-NEXT: vsetvli zero, zero, e32, m4, tu, mu ; RV32-NEXT: vluxei32.v v12, (a0), v28, v0.t ; RV32-NEXT: vmv4r.v v8, v12 ; RV32-NEXT: ret @@ -1734,7 +1707,7 @@ ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vzext.vf8 v16, v8 ; RV64-NEXT: vsll.vi v16, v16, 2 -; RV64-NEXT: vsetvli zero, zero, e32, m4, tu, mu +; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t ; RV64-NEXT: vmv4r.v v8, v12 ; RV64-NEXT: ret @@ -1750,7 +1723,6 @@ ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; RV32-NEXT: vsext.vf2 v28, v8 ; RV32-NEXT: vsll.vi v28, v28, 2 -; RV32-NEXT: vsetvli zero, zero, e32, m4, tu, mu ; RV32-NEXT: vluxei32.v v12, (a0), v28, v0.t ; RV32-NEXT: vmv4r.v v8, v12 ; RV32-NEXT: ret @@ -1760,7 +1732,7 @@ ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vsext.vf4 v16, v8 ; RV64-NEXT: vsll.vi v16, v16, 2 -; RV64-NEXT: vsetvli zero, zero, e32, m4, tu, mu +; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t ; RV64-NEXT: vmv4r.v v8, v12 ; RV64-NEXT: ret @@ -1775,7 +1747,6 @@ ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; RV32-NEXT: vsext.vf2 v28, v8 ; RV32-NEXT: vsll.vi v28, v28, 2 -; RV32-NEXT: vsetvli zero, zero, e32, m4, tu, mu ; RV32-NEXT: vluxei32.v v12, (a0), v28, v0.t ; RV32-NEXT: vmv4r.v v8, v12 ; RV32-NEXT: ret @@ -1785,7 +1756,7 @@ ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vsext.vf4 v16, v8 ; RV64-NEXT: vsll.vi v16, v16, 2 -; RV64-NEXT: vsetvli zero, zero, e32, m4, tu, mu +; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t ; RV64-NEXT: vmv4r.v v8, v12 ; RV64-NEXT: ret @@ -1801,7 +1772,6 @@ ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; RV32-NEXT: vzext.vf2 v28, v8 ; RV32-NEXT: vsll.vi v28, v28, 2 -; RV32-NEXT: vsetvli zero, zero, e32, m4, tu, mu ; RV32-NEXT: vluxei32.v v12, (a0), v28, v0.t ; RV32-NEXT: vmv4r.v v8, v12 ; RV32-NEXT: ret @@ -1811,7 +1781,7 @@ ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vzext.vf4 v16, v8 ; RV64-NEXT: vsll.vi v16, v16, 2 -; RV64-NEXT: vsetvli zero, zero, e32, m4, tu, mu +; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t ; RV64-NEXT: vmv4r.v v8, v12 ; RV64-NEXT: ret @@ -1826,7 +1796,6 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; RV32-NEXT: vsll.vi v28, v8, 2 -; RV32-NEXT: vsetvli zero, zero, e32, m4, tu, mu ; RV32-NEXT: vluxei32.v v12, (a0), v28, v0.t ; RV32-NEXT: vmv4r.v v8, v12 ; RV32-NEXT: ret @@ -1836,7 +1805,7 @@ ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vsext.vf2 v16, v8 ; RV64-NEXT: vsll.vi v16, v16, 2 -; RV64-NEXT: vsetvli zero, zero, e32, m4, tu, mu +; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t ; RV64-NEXT: vmv4r.v v8, v12 ; RV64-NEXT: ret @@ -1850,14 +1819,14 @@ define @mgather_nxv1f64( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv1f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e64, m1, tu, mu +; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv1f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e64, m1, tu, mu +; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v9 ; RV64-NEXT: ret @@ -1870,14 +1839,14 @@ define @mgather_nxv2f64( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv2f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e64, m2, tu, mu +; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t ; RV32-NEXT: vmv2r.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv2f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e64, m2, tu, mu +; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vmv2r.v v8, v10 ; RV64-NEXT: ret @@ -1890,14 +1859,14 @@ define @mgather_nxv4f64( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv4f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e64, m4, tu, mu +; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t ; RV32-NEXT: vmv4r.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv4f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e64, m4, tu, mu +; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t ; RV64-NEXT: vmv4r.v v8, v12 ; RV64-NEXT: ret @@ -1943,14 +1912,14 @@ define @mgather_nxv8f64( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv8f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e64, m8, tu, mu +; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (zero), v8, v0.t ; RV32-NEXT: vmv8r.v v8, v16 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e64, m8, tu, mu +; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV64-NEXT: vluxei64.v v16, (zero), v8, v0.t ; RV64-NEXT: vmv8r.v v8, v16 ; RV64-NEXT: ret @@ -1964,7 +1933,7 @@ ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; RV32-NEXT: vsext.vf4 v28, v8 ; RV32-NEXT: vsll.vi v28, v28, 3 -; RV32-NEXT: vsetvli zero, zero, e64, m8, tu, mu +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v28, v0.t ; RV32-NEXT: vmv8r.v v8, v16 ; RV32-NEXT: ret @@ -1974,7 +1943,6 @@ ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vsext.vf8 v24, v8 ; RV64-NEXT: vsll.vi v8, v24, 3 -; RV64-NEXT: vsetvli zero, zero, e64, m8, tu, mu ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t ; RV64-NEXT: vmv8r.v v8, v16 ; RV64-NEXT: ret @@ -1989,7 +1957,6 @@ ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vsext.vf8 v24, v8 ; RV32-NEXT: vsll.vi v8, v24, 3 -; RV32-NEXT: vsetvli zero, zero, e64, m8, tu, mu ; RV32-NEXT: vluxei64.v v16, (a0), v8, v0.t ; RV32-NEXT: vmv8r.v v8, v16 ; RV32-NEXT: ret @@ -1999,7 +1966,6 @@ ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vsext.vf8 v24, v8 ; RV64-NEXT: vsll.vi v8, v24, 3 -; RV64-NEXT: vsetvli zero, zero, e64, m8, tu, mu ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t ; RV64-NEXT: vmv8r.v v8, v16 ; RV64-NEXT: ret @@ -2015,7 +1981,6 @@ ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vzext.vf8 v24, v8 ; RV32-NEXT: vsll.vi v8, v24, 3 -; RV32-NEXT: vsetvli zero, zero, e64, m8, tu, mu ; RV32-NEXT: vluxei64.v v16, (a0), v8, v0.t ; RV32-NEXT: vmv8r.v v8, v16 ; RV32-NEXT: ret @@ -2025,7 +1990,6 @@ ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vzext.vf8 v24, v8 ; RV64-NEXT: vsll.vi v8, v24, 3 -; RV64-NEXT: vsetvli zero, zero, e64, m8, tu, mu ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t ; RV64-NEXT: vmv8r.v v8, v16 ; RV64-NEXT: ret @@ -2041,7 +2005,7 @@ ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; RV32-NEXT: vsext.vf2 v28, v8 ; RV32-NEXT: vsll.vi v28, v28, 3 -; RV32-NEXT: vsetvli zero, zero, e64, m8, tu, mu +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v28, v0.t ; RV32-NEXT: vmv8r.v v8, v16 ; RV32-NEXT: ret @@ -2051,7 +2015,6 @@ ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vsext.vf4 v24, v8 ; RV64-NEXT: vsll.vi v8, v24, 3 -; RV64-NEXT: vsetvli zero, zero, e64, m8, tu, mu ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t ; RV64-NEXT: vmv8r.v v8, v16 ; RV64-NEXT: ret @@ -2066,7 +2029,6 @@ ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vsext.vf4 v24, v8 ; RV32-NEXT: vsll.vi v8, v24, 3 -; RV32-NEXT: vsetvli zero, zero, e64, m8, tu, mu ; RV32-NEXT: vluxei64.v v16, (a0), v8, v0.t ; RV32-NEXT: vmv8r.v v8, v16 ; RV32-NEXT: ret @@ -2076,7 +2038,6 @@ ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vsext.vf4 v24, v8 ; RV64-NEXT: vsll.vi v8, v24, 3 -; RV64-NEXT: vsetvli zero, zero, e64, m8, tu, mu ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t ; RV64-NEXT: vmv8r.v v8, v16 ; RV64-NEXT: ret @@ -2092,7 +2053,6 @@ ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vzext.vf4 v24, v8 ; RV32-NEXT: vsll.vi v8, v24, 3 -; RV32-NEXT: vsetvli zero, zero, e64, m8, tu, mu ; RV32-NEXT: vluxei64.v v16, (a0), v8, v0.t ; RV32-NEXT: vmv8r.v v8, v16 ; RV32-NEXT: ret @@ -2102,7 +2062,6 @@ ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vzext.vf4 v24, v8 ; RV64-NEXT: vsll.vi v8, v24, 3 -; RV64-NEXT: vsetvli zero, zero, e64, m8, tu, mu ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t ; RV64-NEXT: vmv8r.v v8, v16 ; RV64-NEXT: ret @@ -2117,7 +2076,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; RV32-NEXT: vsll.vi v28, v8, 3 -; RV32-NEXT: vsetvli zero, zero, e64, m8, tu, mu +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v28, v0.t ; RV32-NEXT: vmv8r.v v8, v16 ; RV32-NEXT: ret @@ -2127,7 +2086,6 @@ ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vsext.vf2 v24, v8 ; RV64-NEXT: vsll.vi v8, v24, 3 -; RV64-NEXT: vsetvli zero, zero, e64, m8, tu, mu ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t ; RV64-NEXT: vmv8r.v v8, v16 ; RV64-NEXT: ret @@ -2142,7 +2100,6 @@ ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vsext.vf2 v24, v8 ; RV32-NEXT: vsll.vi v8, v24, 3 -; RV32-NEXT: vsetvli zero, zero, e64, m8, tu, mu ; RV32-NEXT: vluxei64.v v16, (a0), v8, v0.t ; RV32-NEXT: vmv8r.v v8, v16 ; RV32-NEXT: ret @@ -2152,7 +2109,6 @@ ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vsext.vf2 v24, v8 ; RV64-NEXT: vsll.vi v8, v24, 3 -; RV64-NEXT: vsetvli zero, zero, e64, m8, tu, mu ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t ; RV64-NEXT: vmv8r.v v8, v16 ; RV64-NEXT: ret @@ -2168,7 +2124,6 @@ ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vzext.vf2 v24, v8 ; RV32-NEXT: vsll.vi v8, v24, 3 -; RV32-NEXT: vsetvli zero, zero, e64, m8, tu, mu ; RV32-NEXT: vluxei64.v v16, (a0), v8, v0.t ; RV32-NEXT: vmv8r.v v8, v16 ; RV32-NEXT: ret @@ -2178,7 +2133,6 @@ ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vzext.vf2 v24, v8 ; RV64-NEXT: vsll.vi v8, v24, 3 -; RV64-NEXT: vsetvli zero, zero, e64, m8, tu, mu ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t ; RV64-NEXT: vmv8r.v v8, v16 ; RV64-NEXT: ret @@ -2193,7 +2147,6 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v8, v8, 3 -; RV32-NEXT: vsetvli zero, zero, e64, m8, tu, mu ; RV32-NEXT: vluxei64.v v16, (a0), v8, v0.t ; RV32-NEXT: vmv8r.v v8, v16 ; RV32-NEXT: ret @@ -2202,7 +2155,6 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vsll.vi v8, v8, 3 -; RV64-NEXT: vsetvli zero, zero, e64, m8, tu, mu ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t ; RV64-NEXT: vmv8r.v v8, v16 ; RV64-NEXT: ret @@ -2218,7 +2170,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; RV32-NEXT: vsext.vf4 v16, v8 -; RV32-NEXT: vsetvli zero, zero, e8, m2, tu, mu +; RV32-NEXT: vsetvli zero, zero, e8, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (a0), v16, v0.t ; RV32-NEXT: vmv2r.v v8, v10 ; RV32-NEXT: ret @@ -2227,7 +2179,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vsext.vf8 v16, v8 -; RV64-NEXT: vsetvli zero, zero, e8, m1, tu, mu +; RV64-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v16, v0.t ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: srli a1, a1, 3 @@ -2235,7 +2187,7 @@ ; RV64-NEXT: vslidedown.vx v0, v0, a1 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vsext.vf8 v16, v9 -; RV64-NEXT: vsetvli zero, zero, e8, m1, tu, mu +; RV64-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; RV64-NEXT: vluxei64.v v11, (a0), v16, v0.t ; RV64-NEXT: vmv2r.v v8, v10 ; RV64-NEXT: ret @@ -2251,7 +2203,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; RV32-NEXT: vsext.vf4 v16, v8 -; RV32-NEXT: vsetvli zero, zero, e8, m2, tu, mu +; RV32-NEXT: vsetvli zero, zero, e8, m2, ta, mu ; RV32-NEXT: vluxei32.v v12, (a0), v16, v0.t ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: srli a1, a1, 2 @@ -2259,7 +2211,7 @@ ; RV32-NEXT: vslidedown.vx v0, v0, a1 ; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; RV32-NEXT: vsext.vf4 v16, v10 -; RV32-NEXT: vsetvli zero, zero, e8, m2, tu, mu +; RV32-NEXT: vsetvli zero, zero, e8, m2, ta, mu ; RV32-NEXT: vluxei32.v v14, (a0), v16, v0.t ; RV32-NEXT: vmv4r.v v8, v12 ; RV32-NEXT: ret @@ -2269,7 +2221,7 @@ ; RV64-NEXT: vmv1r.v v25, v0 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vsext.vf8 v16, v8 -; RV64-NEXT: vsetvli zero, zero, e8, m1, tu, mu +; RV64-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: srli a2, a1, 3 @@ -2277,20 +2229,20 @@ ; RV64-NEXT: vslidedown.vx v0, v0, a2 ; RV64-NEXT: vsetvli a3, zero, e64, m8, ta, mu ; RV64-NEXT: vsext.vf8 v16, v9 -; RV64-NEXT: vsetvli zero, zero, e8, m1, tu, mu +; RV64-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; RV64-NEXT: vluxei64.v v13, (a0), v16, v0.t ; RV64-NEXT: srli a1, a1, 2 ; RV64-NEXT: vsetvli a3, zero, e8, mf2, ta, mu ; RV64-NEXT: vslidedown.vx v0, v25, a1 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vsext.vf8 v16, v10 -; RV64-NEXT: vsetvli zero, zero, e8, m1, tu, mu +; RV64-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; RV64-NEXT: vluxei64.v v14, (a0), v16, v0.t ; RV64-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; RV64-NEXT: vslidedown.vx v0, v0, a2 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vsext.vf8 v16, v11 -; RV64-NEXT: vsetvli zero, zero, e8, m1, tu, mu +; RV64-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; RV64-NEXT: vluxei64.v v15, (a0), v16, v0.t ; RV64-NEXT: vmv4r.v v8, v12 ; RV64-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/tail-agnostic-impdef-copy.mir b/llvm/test/CodeGen/RISCV/rvv/tail-agnostic-impdef-copy.mir --- a/llvm/test/CodeGen/RISCV/rvv/tail-agnostic-impdef-copy.mir +++ b/llvm/test/CodeGen/RISCV/rvv/tail-agnostic-impdef-copy.mir @@ -52,7 +52,7 @@ ; CHECK: $v0 = COPY [[COPY]] ; CHECK: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF ; CHECK: [[COPY2:%[0-9]+]]:vrm8nov0 = COPY [[DEF]] - ; CHECK: [[PseudoVLE64_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64_V_M8_MASK [[COPY2]], [[COPY1]], $v0, $x0, 6 :: (load (s512) from %ir.a, align 8) + ; CHECK: [[PseudoVLE64_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64_V_M8_MASK [[COPY2]], [[COPY1]], $v0, $x0, 6, 1 :: (load (s512) from %ir.a, align 8) ; CHECK: $v8m8 = COPY [[PseudoVLE64_V_M8_MASK]] ; CHECK: PseudoRET implicit $v8m8 %1:vr = COPY $v0 @@ -60,7 +60,7 @@ $v0 = COPY %1 %3:vrm8 = IMPLICIT_DEF %4:vrm8nov0 = COPY %3 - %2:vrm8nov0 = PseudoVLE64_V_M8_MASK %4, %0, $v0, $x0, 6 :: (load (s512) from %ir.a, align 8) + %2:vrm8nov0 = PseudoVLE64_V_M8_MASK %4, %0, $v0, $x0, 6, 1 :: (load (s512) from %ir.a, align 8) $v8m8 = COPY %2 PseudoRET implicit $v8m8 diff --git a/llvm/test/CodeGen/RISCV/rvv/vaadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vaadd-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vaadd-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vaadd-rv32.ll @@ -26,12 +26,13 @@ , , , + i32, i32); define @intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vaadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -70,12 +71,13 @@ , , , + i32, i32); define @intrinsic_vaadd_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vaadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -114,12 +116,13 @@ , , , + i32, i32); define @intrinsic_vaadd_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vaadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -158,12 +161,13 @@ , , , + i32, i32); define @intrinsic_vaadd_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vaadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -202,12 +206,13 @@ , , , + i32, i32); define @intrinsic_vaadd_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vaadd.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -246,12 +251,13 @@ , , , + i32, i32); define @intrinsic_vaadd_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vaadd.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -260,7 +266,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -290,13 +296,14 @@ , , , + i32, i32); define @intrinsic_vaadd_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vaadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -335,12 +342,13 @@ , , , + i32, i32); define @intrinsic_vaadd_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vaadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -379,12 +387,13 @@ , , , + i32, i32); define @intrinsic_vaadd_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vaadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -423,12 +432,13 @@ , , , + i32, i32); define @intrinsic_vaadd_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vaadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -467,12 +477,13 @@ , , , + i32, i32); define @intrinsic_vaadd_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vaadd.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -481,7 +492,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -511,12 +522,13 @@ , , , + i32, i32); define @intrinsic_vaadd_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vaadd.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -525,7 +537,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -555,13 +567,14 @@ , , , + i32, i32); define @intrinsic_vaadd_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vaadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -600,12 +613,13 @@ , , , + i32, i32); define @intrinsic_vaadd_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vaadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -644,12 +658,13 @@ , , , + i32, i32); define @intrinsic_vaadd_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vaadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -658,7 +673,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -688,12 +703,13 @@ , , , + i32, i32); define @intrinsic_vaadd_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vaadd.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -702,7 +718,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -732,12 +748,13 @@ , , , + i32, i32); define @intrinsic_vaadd_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vaadd.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +763,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -776,13 +793,14 @@ , , , + i32, i32); define @intrinsic_vaadd_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vaadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +809,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -821,12 +839,13 @@ , , , + i32, i32); define @intrinsic_vaadd_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vaadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +854,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -865,12 +884,13 @@ , , , + i32, i32); define @intrinsic_vaadd_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vaadd.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +899,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -909,12 +929,13 @@ , , , + i32, i32); define @intrinsic_vaadd_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vaadd.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +944,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -953,13 +974,14 @@ , , , + i32, i32); define @intrinsic_vaadd_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vaadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -968,7 +990,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -998,12 +1020,13 @@ , i8, , + i32, i32); define @intrinsic_vaadd_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vaadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1012,7 +1035,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1042,12 +1065,13 @@ , i8, , + i32, i32); define @intrinsic_vaadd_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vaadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1056,7 +1080,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1086,12 +1110,13 @@ , i8, , + i32, i32); define @intrinsic_vaadd_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vaadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1100,7 +1125,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1130,12 +1155,13 @@ , i8, , + i32, i32); define @intrinsic_vaadd_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vaadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1144,7 +1170,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1174,12 +1200,13 @@ , i8, , + i32, i32); define @intrinsic_vaadd_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vaadd.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1188,7 +1215,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1218,12 +1245,13 @@ , i8, , + i32, i32); define @intrinsic_vaadd_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vaadd.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1232,7 +1260,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1262,12 +1290,13 @@ , i8, , + i32, i32); define @intrinsic_vaadd_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vaadd.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1276,7 +1305,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1306,12 +1335,13 @@ , i16, , + i32, i32); define @intrinsic_vaadd_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vaadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1320,7 +1350,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1350,12 +1380,13 @@ , i16, , + i32, i32); define @intrinsic_vaadd_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vaadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1364,7 +1395,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1394,12 +1425,13 @@ , i16, , + i32, i32); define @intrinsic_vaadd_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vaadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1408,7 +1440,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1438,12 +1470,13 @@ , i16, , + i32, i32); define @intrinsic_vaadd_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vaadd.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1452,7 +1485,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1482,12 +1515,13 @@ , i16, , + i32, i32); define @intrinsic_vaadd_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vaadd.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1496,7 +1530,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1526,12 +1560,13 @@ , i16, , + i32, i32); define @intrinsic_vaadd_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vaadd.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1540,7 +1575,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1570,12 +1605,13 @@ , i32, , + i32, i32); define @intrinsic_vaadd_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vaadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1584,7 +1620,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1614,12 +1650,13 @@ , i32, , + i32, i32); define @intrinsic_vaadd_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vaadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1628,7 +1665,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1658,12 +1695,13 @@ , i32, , + i32, i32); define @intrinsic_vaadd_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vaadd.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1672,7 +1710,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1702,12 +1740,13 @@ , i32, , + i32, i32); define @intrinsic_vaadd_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vaadd.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1716,7 +1755,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1746,12 +1785,13 @@ , i32, , + i32, i32); define @intrinsic_vaadd_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vaadd.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1760,7 +1800,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1796,6 +1836,7 @@ , i64, , + i32, i32); define @intrinsic_vaadd_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1804,10 +1845,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v25, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vaadd.vv v8, v9, v25, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1817,7 +1858,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1853,6 +1894,7 @@ , i64, , + i32, i32); define @intrinsic_vaadd_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1861,10 +1903,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v26, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vaadd.vv v8, v10, v26, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1874,7 +1916,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1910,6 +1952,7 @@ , i64, , + i32, i32); define @intrinsic_vaadd_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1918,10 +1961,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v28, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vaadd.vv v8, v12, v28, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1931,7 +1974,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1967,6 +2010,7 @@ , i64, , + i32, i32); define @intrinsic_vaadd_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1975,10 +2019,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v24, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vaadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1988,7 +2032,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vaadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vaadd-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vaadd-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vaadd-rv64.ll @@ -26,12 +26,13 @@ , , , + i64, i64); define @intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vaadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -70,12 +71,13 @@ , , , + i64, i64); define @intrinsic_vaadd_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vaadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -114,12 +116,13 @@ , , , + i64, i64); define @intrinsic_vaadd_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vaadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -158,12 +161,13 @@ , , , + i64, i64); define @intrinsic_vaadd_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vaadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -202,12 +206,13 @@ , , , + i64, i64); define @intrinsic_vaadd_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vaadd.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -246,12 +251,13 @@ , , , + i64, i64); define @intrinsic_vaadd_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vaadd.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -260,7 +266,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -290,13 +296,14 @@ , , , + i64, i64); define @intrinsic_vaadd_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vaadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -335,12 +342,13 @@ , , , + i64, i64); define @intrinsic_vaadd_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vaadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -379,12 +387,13 @@ , , , + i64, i64); define @intrinsic_vaadd_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vaadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -423,12 +432,13 @@ , , , + i64, i64); define @intrinsic_vaadd_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vaadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -467,12 +477,13 @@ , , , + i64, i64); define @intrinsic_vaadd_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vaadd.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -481,7 +492,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -511,12 +522,13 @@ , , , + i64, i64); define @intrinsic_vaadd_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vaadd.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -525,7 +537,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -555,13 +567,14 @@ , , , + i64, i64); define @intrinsic_vaadd_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vaadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -600,12 +613,13 @@ , , , + i64, i64); define @intrinsic_vaadd_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vaadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -644,12 +658,13 @@ , , , + i64, i64); define @intrinsic_vaadd_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vaadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -658,7 +673,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -688,12 +703,13 @@ , , , + i64, i64); define @intrinsic_vaadd_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vaadd.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -702,7 +718,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -732,12 +748,13 @@ , , , + i64, i64); define @intrinsic_vaadd_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vaadd.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +763,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -776,13 +793,14 @@ , , , + i64, i64); define @intrinsic_vaadd_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vaadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +809,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -821,12 +839,13 @@ , , , + i64, i64); define @intrinsic_vaadd_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vaadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +854,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -865,12 +884,13 @@ , , , + i64, i64); define @intrinsic_vaadd_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vaadd.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +899,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -909,12 +929,13 @@ , , , + i64, i64); define @intrinsic_vaadd_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vaadd.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +944,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -953,13 +974,14 @@ , , , + i64, i64); define @intrinsic_vaadd_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vaadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -968,7 +990,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -998,12 +1020,13 @@ , i8, , + i64, i64); define @intrinsic_vaadd_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vaadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1012,7 +1035,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1042,12 +1065,13 @@ , i8, , + i64, i64); define @intrinsic_vaadd_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vaadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1056,7 +1080,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1086,12 +1110,13 @@ , i8, , + i64, i64); define @intrinsic_vaadd_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vaadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1100,7 +1125,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1130,12 +1155,13 @@ , i8, , + i64, i64); define @intrinsic_vaadd_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vaadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1144,7 +1170,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1174,12 +1200,13 @@ , i8, , + i64, i64); define @intrinsic_vaadd_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vaadd.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1188,7 +1215,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1218,12 +1245,13 @@ , i8, , + i64, i64); define @intrinsic_vaadd_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vaadd.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1232,7 +1260,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1262,12 +1290,13 @@ , i8, , + i64, i64); define @intrinsic_vaadd_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vaadd.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1276,7 +1305,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1306,12 +1335,13 @@ , i16, , + i64, i64); define @intrinsic_vaadd_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vaadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1320,7 +1350,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1350,12 +1380,13 @@ , i16, , + i64, i64); define @intrinsic_vaadd_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vaadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1364,7 +1395,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1394,12 +1425,13 @@ , i16, , + i64, i64); define @intrinsic_vaadd_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vaadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1408,7 +1440,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1438,12 +1470,13 @@ , i16, , + i64, i64); define @intrinsic_vaadd_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vaadd.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1452,7 +1485,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1482,12 +1515,13 @@ , i16, , + i64, i64); define @intrinsic_vaadd_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vaadd.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1496,7 +1530,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1526,12 +1560,13 @@ , i16, , + i64, i64); define @intrinsic_vaadd_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vaadd.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1540,7 +1575,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1570,12 +1605,13 @@ , i32, , + i64, i64); define @intrinsic_vaadd_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vaadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1584,7 +1620,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1614,12 +1650,13 @@ , i32, , + i64, i64); define @intrinsic_vaadd_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vaadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1628,7 +1665,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1658,12 +1695,13 @@ , i32, , + i64, i64); define @intrinsic_vaadd_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vaadd.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1672,7 +1710,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1702,12 +1740,13 @@ , i32, , + i64, i64); define @intrinsic_vaadd_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vaadd.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1716,7 +1755,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1746,12 +1785,13 @@ , i32, , + i64, i64); define @intrinsic_vaadd_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vaadd.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1760,7 +1800,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1790,12 +1830,13 @@ , i64, , + i64, i64); define @intrinsic_vaadd_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vaadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1804,7 +1845,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1834,12 +1875,13 @@ , i64, , + i64, i64); define @intrinsic_vaadd_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vaadd.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1848,7 +1890,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1878,12 +1920,13 @@ , i64, , + i64, i64); define @intrinsic_vaadd_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vaadd.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1892,7 +1935,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1922,12 +1965,13 @@ , i64, , + i64, i64); define @intrinsic_vaadd_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vaadd.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1936,7 +1980,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv32.ll @@ -26,12 +26,13 @@ , , , + i32, i32); define @intrinsic_vaaddu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vaaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -70,12 +71,13 @@ , , , + i32, i32); define @intrinsic_vaaddu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vaaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -114,12 +116,13 @@ , , , + i32, i32); define @intrinsic_vaaddu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vaaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -158,12 +161,13 @@ , , , + i32, i32); define @intrinsic_vaaddu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vaaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -202,12 +206,13 @@ , , , + i32, i32); define @intrinsic_vaaddu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vaaddu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -246,12 +251,13 @@ , , , + i32, i32); define @intrinsic_vaaddu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vaaddu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -260,7 +266,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -290,13 +296,14 @@ , , , + i32, i32); define @intrinsic_vaaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vaaddu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -335,12 +342,13 @@ , , , + i32, i32); define @intrinsic_vaaddu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vaaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -379,12 +387,13 @@ , , , + i32, i32); define @intrinsic_vaaddu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vaaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -423,12 +432,13 @@ , , , + i32, i32); define @intrinsic_vaaddu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vaaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -467,12 +477,13 @@ , , , + i32, i32); define @intrinsic_vaaddu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vaaddu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -481,7 +492,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -511,12 +522,13 @@ , , , + i32, i32); define @intrinsic_vaaddu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vaaddu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -525,7 +537,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -555,13 +567,14 @@ , , , + i32, i32); define @intrinsic_vaaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vaaddu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -600,12 +613,13 @@ , , , + i32, i32); define @intrinsic_vaaddu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vaaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -644,12 +658,13 @@ , , , + i32, i32); define @intrinsic_vaaddu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vaaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -658,7 +673,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -688,12 +703,13 @@ , , , + i32, i32); define @intrinsic_vaaddu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vaaddu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -702,7 +718,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -732,12 +748,13 @@ , , , + i32, i32); define @intrinsic_vaaddu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vaaddu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +763,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -776,13 +793,14 @@ , , , + i32, i32); define @intrinsic_vaaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vaaddu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +809,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -821,12 +839,13 @@ , , , + i32, i32); define @intrinsic_vaaddu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vaaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +854,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -865,12 +884,13 @@ , , , + i32, i32); define @intrinsic_vaaddu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vaaddu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +899,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -909,12 +929,13 @@ , , , + i32, i32); define @intrinsic_vaaddu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vaaddu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +944,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -953,13 +974,14 @@ , , , + i32, i32); define @intrinsic_vaaddu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vaaddu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -968,7 +990,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -998,12 +1020,13 @@ , i8, , + i32, i32); define @intrinsic_vaaddu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vaaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1012,7 +1035,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1042,12 +1065,13 @@ , i8, , + i32, i32); define @intrinsic_vaaddu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vaaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1056,7 +1080,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1086,12 +1110,13 @@ , i8, , + i32, i32); define @intrinsic_vaaddu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vaaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1100,7 +1125,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1130,12 +1155,13 @@ , i8, , + i32, i32); define @intrinsic_vaaddu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vaaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1144,7 +1170,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1174,12 +1200,13 @@ , i8, , + i32, i32); define @intrinsic_vaaddu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vaaddu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1188,7 +1215,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1218,12 +1245,13 @@ , i8, , + i32, i32); define @intrinsic_vaaddu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vaaddu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1232,7 +1260,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1262,12 +1290,13 @@ , i8, , + i32, i32); define @intrinsic_vaaddu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vaaddu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1276,7 +1305,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1306,12 +1335,13 @@ , i16, , + i32, i32); define @intrinsic_vaaddu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vaaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1320,7 +1350,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1350,12 +1380,13 @@ , i16, , + i32, i32); define @intrinsic_vaaddu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vaaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1364,7 +1395,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1394,12 +1425,13 @@ , i16, , + i32, i32); define @intrinsic_vaaddu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vaaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1408,7 +1440,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1438,12 +1470,13 @@ , i16, , + i32, i32); define @intrinsic_vaaddu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vaaddu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1452,7 +1485,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1482,12 +1515,13 @@ , i16, , + i32, i32); define @intrinsic_vaaddu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vaaddu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1496,7 +1530,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1526,12 +1560,13 @@ , i16, , + i32, i32); define @intrinsic_vaaddu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vaaddu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1540,7 +1575,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1570,12 +1605,13 @@ , i32, , + i32, i32); define @intrinsic_vaaddu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vaaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1584,7 +1620,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1614,12 +1650,13 @@ , i32, , + i32, i32); define @intrinsic_vaaddu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vaaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1628,7 +1665,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1658,12 +1695,13 @@ , i32, , + i32, i32); define @intrinsic_vaaddu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vaaddu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1672,7 +1710,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1702,12 +1740,13 @@ , i32, , + i32, i32); define @intrinsic_vaaddu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vaaddu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1716,7 +1755,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1746,12 +1785,13 @@ , i32, , + i32, i32); define @intrinsic_vaaddu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vaaddu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1760,7 +1800,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1796,6 +1836,7 @@ , i64, , + i32, i32); define @intrinsic_vaaddu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1804,10 +1845,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v25, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vaaddu.vv v8, v9, v25, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1817,7 +1858,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1853,6 +1894,7 @@ , i64, , + i32, i32); define @intrinsic_vaaddu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1861,10 +1903,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v26, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vaaddu.vv v8, v10, v26, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1874,7 +1916,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1910,6 +1952,7 @@ , i64, , + i32, i32); define @intrinsic_vaaddu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1918,10 +1961,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v28, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vaaddu.vv v8, v12, v28, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1931,7 +1974,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1967,6 +2010,7 @@ , i64, , + i32, i32); define @intrinsic_vaaddu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1975,10 +2019,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v24, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vaaddu.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1988,7 +2032,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv64.ll @@ -26,12 +26,13 @@ , , , + i64, i64); define @intrinsic_vaaddu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vaaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -70,12 +71,13 @@ , , , + i64, i64); define @intrinsic_vaaddu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vaaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -114,12 +116,13 @@ , , , + i64, i64); define @intrinsic_vaaddu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vaaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -158,12 +161,13 @@ , , , + i64, i64); define @intrinsic_vaaddu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vaaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -202,12 +206,13 @@ , , , + i64, i64); define @intrinsic_vaaddu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vaaddu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -246,12 +251,13 @@ , , , + i64, i64); define @intrinsic_vaaddu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vaaddu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -260,7 +266,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -290,13 +296,14 @@ , , , + i64, i64); define @intrinsic_vaaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vaaddu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -335,12 +342,13 @@ , , , + i64, i64); define @intrinsic_vaaddu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vaaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -379,12 +387,13 @@ , , , + i64, i64); define @intrinsic_vaaddu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vaaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -423,12 +432,13 @@ , , , + i64, i64); define @intrinsic_vaaddu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vaaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -467,12 +477,13 @@ , , , + i64, i64); define @intrinsic_vaaddu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vaaddu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -481,7 +492,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -511,12 +522,13 @@ , , , + i64, i64); define @intrinsic_vaaddu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vaaddu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -525,7 +537,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -555,13 +567,14 @@ , , , + i64, i64); define @intrinsic_vaaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vaaddu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -600,12 +613,13 @@ , , , + i64, i64); define @intrinsic_vaaddu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vaaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -644,12 +658,13 @@ , , , + i64, i64); define @intrinsic_vaaddu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vaaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -658,7 +673,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -688,12 +703,13 @@ , , , + i64, i64); define @intrinsic_vaaddu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vaaddu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -702,7 +718,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -732,12 +748,13 @@ , , , + i64, i64); define @intrinsic_vaaddu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vaaddu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +763,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -776,13 +793,14 @@ , , , + i64, i64); define @intrinsic_vaaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vaaddu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +809,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -821,12 +839,13 @@ , , , + i64, i64); define @intrinsic_vaaddu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vaaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +854,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -865,12 +884,13 @@ , , , + i64, i64); define @intrinsic_vaaddu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vaaddu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +899,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -909,12 +929,13 @@ , , , + i64, i64); define @intrinsic_vaaddu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vaaddu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +944,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -953,13 +974,14 @@ , , , + i64, i64); define @intrinsic_vaaddu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vaaddu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -968,7 +990,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -998,12 +1020,13 @@ , i8, , + i64, i64); define @intrinsic_vaaddu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vaaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1012,7 +1035,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1042,12 +1065,13 @@ , i8, , + i64, i64); define @intrinsic_vaaddu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vaaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1056,7 +1080,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1086,12 +1110,13 @@ , i8, , + i64, i64); define @intrinsic_vaaddu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vaaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1100,7 +1125,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1130,12 +1155,13 @@ , i8, , + i64, i64); define @intrinsic_vaaddu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vaaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1144,7 +1170,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1174,12 +1200,13 @@ , i8, , + i64, i64); define @intrinsic_vaaddu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vaaddu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1188,7 +1215,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1218,12 +1245,13 @@ , i8, , + i64, i64); define @intrinsic_vaaddu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vaaddu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1232,7 +1260,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1262,12 +1290,13 @@ , i8, , + i64, i64); define @intrinsic_vaaddu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vaaddu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1276,7 +1305,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1306,12 +1335,13 @@ , i16, , + i64, i64); define @intrinsic_vaaddu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vaaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1320,7 +1350,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1350,12 +1380,13 @@ , i16, , + i64, i64); define @intrinsic_vaaddu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vaaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1364,7 +1395,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1394,12 +1425,13 @@ , i16, , + i64, i64); define @intrinsic_vaaddu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vaaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1408,7 +1440,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1438,12 +1470,13 @@ , i16, , + i64, i64); define @intrinsic_vaaddu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vaaddu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1452,7 +1485,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1482,12 +1515,13 @@ , i16, , + i64, i64); define @intrinsic_vaaddu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vaaddu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1496,7 +1530,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1526,12 +1560,13 @@ , i16, , + i64, i64); define @intrinsic_vaaddu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vaaddu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1540,7 +1575,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1570,12 +1605,13 @@ , i32, , + i64, i64); define @intrinsic_vaaddu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vaaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1584,7 +1620,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1614,12 +1650,13 @@ , i32, , + i64, i64); define @intrinsic_vaaddu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vaaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1628,7 +1665,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1658,12 +1695,13 @@ , i32, , + i64, i64); define @intrinsic_vaaddu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vaaddu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1672,7 +1710,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1702,12 +1740,13 @@ , i32, , + i64, i64); define @intrinsic_vaaddu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vaaddu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1716,7 +1755,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1746,12 +1785,13 @@ , i32, , + i64, i64); define @intrinsic_vaaddu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vaaddu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1760,7 +1800,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1790,12 +1830,13 @@ , i64, , + i64, i64); define @intrinsic_vaaddu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vaaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1804,7 +1845,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1834,12 +1875,13 @@ , i64, , + i64, i64); define @intrinsic_vaaddu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vaaddu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1848,7 +1890,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1878,12 +1920,13 @@ , i64, , + i64, i64); define @intrinsic_vaaddu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vaaddu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1892,7 +1935,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1922,12 +1965,13 @@ , i64, , + i64, i64); define @intrinsic_vaaddu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vaaddu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1936,7 +1980,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-policy.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-policy.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vadd-policy.ll @@ -0,0 +1,65 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s + +declare @llvm.riscv.vadd.nxv8i8.nxv8i8( + , + , + i64); + +define @intrinsic_vadd_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv8i8_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vadd.nxv8i8.nxv8i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv8i8.nxv8i8( + , + , + , + , + i64, i64); + +define @intrinsic_vadd_mask_tu( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_tu: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + i64 %4, i64 0) + + ret %a +} + +define @intrinsic_vadd_mask_ta( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_ta: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + i64 %4, i64 1) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll @@ -26,12 +26,12 @@ , , , - i32); + i32, i32); define @intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +40,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -70,12 +70,12 @@ , , , - i32); + i32, i32); define @intrinsic_vadd_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +84,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -114,12 +114,12 @@ , , , - i32); + i32, i32); define @intrinsic_vadd_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +128,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -158,12 +158,12 @@ , , , - i32); + i32, i32); define @intrinsic_vadd_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +172,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -202,12 +202,12 @@ , , , - i32); + i32, i32); define @intrinsic_vadd_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vadd.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +216,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -246,12 +246,12 @@ , , , - i32); + i32, i32); define @intrinsic_vadd_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vadd.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -260,7 +260,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -290,13 +290,13 @@ , , , - i32); + i32, i32); define @intrinsic_vadd_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +305,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -335,12 +335,12 @@ , , , - i32); + i32, i32); define @intrinsic_vadd_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +349,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -379,12 +379,12 @@ , , , - i32); + i32, i32); define @intrinsic_vadd_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +393,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -423,12 +423,12 @@ , , , - i32); + i32, i32); define @intrinsic_vadd_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +437,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -467,12 +467,12 @@ , , , - i32); + i32, i32); define @intrinsic_vadd_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vadd.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -481,7 +481,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -511,12 +511,12 @@ , , , - i32); + i32, i32); define @intrinsic_vadd_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vadd.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -525,7 +525,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -555,13 +555,13 @@ , , , - i32); + i32, i32); define @intrinsic_vadd_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +570,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -600,12 +600,12 @@ , , , - i32); + i32, i32); define @intrinsic_vadd_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +614,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -644,12 +644,12 @@ , , , - i32); + i32, i32); define @intrinsic_vadd_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -658,7 +658,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -688,12 +688,12 @@ , , , - i32); + i32, i32); define @intrinsic_vadd_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vadd.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -702,7 +702,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -732,12 +732,12 @@ , , , - i32); + i32, i32); define @intrinsic_vadd_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vadd.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +746,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -776,13 +776,13 @@ , , , - i32); + i32, i32); define @intrinsic_vadd_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +791,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -821,12 +821,12 @@ , , , - i32); + i32, i32); define @intrinsic_vadd_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +835,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -865,12 +865,12 @@ , , , - i32); + i32, i32); define @intrinsic_vadd_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vadd.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +879,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -909,12 +909,12 @@ , , , - i32); + i32, i32); define @intrinsic_vadd_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vadd.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +923,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -953,13 +953,13 @@ , , , - i32); + i32, i32); define @intrinsic_vadd_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -968,7 +968,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -998,12 +998,12 @@ , i8, , - i32); + i32, i32); define @intrinsic_vadd_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1012,7 +1012,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1042,12 +1042,12 @@ , i8, , - i32); + i32, i32); define @intrinsic_vadd_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1056,7 +1056,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1086,12 +1086,12 @@ , i8, , - i32); + i32, i32); define @intrinsic_vadd_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1100,7 +1100,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1130,12 +1130,12 @@ , i8, , - i32); + i32, i32); define @intrinsic_vadd_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1144,7 +1144,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1174,12 +1174,12 @@ , i8, , - i32); + i32, i32); define @intrinsic_vadd_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vadd.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1188,7 +1188,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1218,12 +1218,12 @@ , i8, , - i32); + i32, i32); define @intrinsic_vadd_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vadd.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1232,7 +1232,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1262,12 +1262,12 @@ , i8, , - i32); + i32, i32); define @intrinsic_vadd_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vadd.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1276,7 +1276,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1306,12 +1306,12 @@ , i16, , - i32); + i32, i32); define @intrinsic_vadd_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1320,7 +1320,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1350,12 +1350,12 @@ , i16, , - i32); + i32, i32); define @intrinsic_vadd_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1364,7 +1364,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1394,12 +1394,12 @@ , i16, , - i32); + i32, i32); define @intrinsic_vadd_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1408,7 +1408,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1438,12 +1438,12 @@ , i16, , - i32); + i32, i32); define @intrinsic_vadd_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vadd.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1452,7 +1452,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1482,12 +1482,12 @@ , i16, , - i32); + i32, i32); define @intrinsic_vadd_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vadd.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1496,7 +1496,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1526,12 +1526,12 @@ , i16, , - i32); + i32, i32); define @intrinsic_vadd_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vadd.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1540,7 +1540,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1570,12 +1570,12 @@ , i32, , - i32); + i32, i32); define @intrinsic_vadd_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1584,7 +1584,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1614,12 +1614,12 @@ , i32, , - i32); + i32, i32); define @intrinsic_vadd_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1628,7 +1628,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1658,12 +1658,12 @@ , i32, , - i32); + i32, i32); define @intrinsic_vadd_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vadd.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1672,7 +1672,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1702,12 +1702,12 @@ , i32, , - i32); + i32, i32); define @intrinsic_vadd_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vadd.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1716,7 +1716,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1746,12 +1746,12 @@ , i32, , - i32); + i32, i32); define @intrinsic_vadd_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vadd.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1760,7 +1760,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1796,7 +1796,7 @@ , i64, , - i32); + i32, i32); define @intrinsic_vadd_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i64_nxv1i64_i64: @@ -1804,10 +1804,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v25, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vadd.vv v8, v9, v25, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1817,7 +1817,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1853,7 +1853,7 @@ , i64, , - i32); + i32, i32); define @intrinsic_vadd_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i64_nxv2i64_i64: @@ -1861,10 +1861,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v26, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vadd.vv v8, v10, v26, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1874,7 +1874,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1910,7 +1910,7 @@ , i64, , - i32); + i32, i32); define @intrinsic_vadd_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i64_nxv4i64_i64: @@ -1918,10 +1918,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v28, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vadd.vv v8, v12, v28, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1931,7 +1931,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1967,7 +1967,7 @@ , i64, , - i32); + i32, i32); define @intrinsic_vadd_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i64_nxv8i64_i64: @@ -1975,10 +1975,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v24, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1988,7 +1988,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2011,7 +2011,7 @@ define @intrinsic_vadd_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t ; CHECK-NEXT: ret entry: @@ -2020,7 +2020,7 @@ %1, i8 -9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2043,7 +2043,7 @@ define @intrinsic_vadd_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t ; CHECK-NEXT: ret entry: @@ -2052,7 +2052,7 @@ %1, i8 -9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2075,7 +2075,7 @@ define @intrinsic_vadd_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t ; CHECK-NEXT: ret entry: @@ -2084,7 +2084,7 @@ %1, i8 -9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2107,7 +2107,7 @@ define @intrinsic_vadd_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t ; CHECK-NEXT: ret entry: @@ -2116,7 +2116,7 @@ %1, i8 -9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2139,7 +2139,7 @@ define @intrinsic_vadd_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vadd.vi v8, v10, -9, v0.t ; CHECK-NEXT: ret entry: @@ -2148,7 +2148,7 @@ %1, i8 -9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2171,7 +2171,7 @@ define @intrinsic_vadd_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vadd.vi v8, v12, -9, v0.t ; CHECK-NEXT: ret entry: @@ -2180,7 +2180,7 @@ %1, i8 -9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2203,7 +2203,7 @@ define @intrinsic_vadd_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vadd.vi v8, v16, -9, v0.t ; CHECK-NEXT: ret entry: @@ -2212,7 +2212,7 @@ %1, i8 -9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2235,7 +2235,7 @@ define @intrinsic_vadd_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t ; CHECK-NEXT: ret entry: @@ -2244,7 +2244,7 @@ %1, i16 -9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2267,7 +2267,7 @@ define @intrinsic_vadd_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t ; CHECK-NEXT: ret entry: @@ -2276,7 +2276,7 @@ %1, i16 -9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2299,7 +2299,7 @@ define @intrinsic_vadd_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t ; CHECK-NEXT: ret entry: @@ -2308,7 +2308,7 @@ %1, i16 -9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2331,7 +2331,7 @@ define @intrinsic_vadd_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vadd.vi v8, v10, -9, v0.t ; CHECK-NEXT: ret entry: @@ -2340,7 +2340,7 @@ %1, i16 -9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2363,7 +2363,7 @@ define @intrinsic_vadd_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vadd.vi v8, v12, -9, v0.t ; CHECK-NEXT: ret entry: @@ -2372,7 +2372,7 @@ %1, i16 -9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2395,7 +2395,7 @@ define @intrinsic_vadd_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vadd.vi v8, v16, -9, v0.t ; CHECK-NEXT: ret entry: @@ -2404,7 +2404,7 @@ %1, i16 -9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2427,7 +2427,7 @@ define @intrinsic_vadd_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t ; CHECK-NEXT: ret entry: @@ -2436,7 +2436,7 @@ %1, i32 -9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2459,7 +2459,7 @@ define @intrinsic_vadd_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t ; CHECK-NEXT: ret entry: @@ -2468,7 +2468,7 @@ %1, i32 -9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2491,7 +2491,7 @@ define @intrinsic_vadd_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vadd.vi v8, v10, -9, v0.t ; CHECK-NEXT: ret entry: @@ -2500,7 +2500,7 @@ %1, i32 -9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2523,7 +2523,7 @@ define @intrinsic_vadd_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vadd.vi v8, v12, -9, v0.t ; CHECK-NEXT: ret entry: @@ -2532,7 +2532,7 @@ %1, i32 -9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2555,7 +2555,7 @@ define @intrinsic_vadd_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vadd.vi v8, v16, -9, v0.t ; CHECK-NEXT: ret entry: @@ -2564,7 +2564,7 @@ %1, i32 -9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2587,7 +2587,7 @@ define @intrinsic_vadd_mask_vi_nxv1i64_nxv1i64_i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2596,7 +2596,7 @@ %1, i64 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2619,7 +2619,7 @@ define @intrinsic_vadd_mask_vi_nxv2i64_nxv2i64_i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vadd.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2628,7 +2628,7 @@ %1, i64 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2651,7 +2651,7 @@ define @intrinsic_vadd_mask_vi_nxv4i64_nxv4i64_i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vadd.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2660,7 +2660,7 @@ %1, i64 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2683,7 +2683,7 @@ define @intrinsic_vadd_mask_vi_nxv8i64_nxv8i64_i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vadd.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2692,7 +2692,7 @@ %1, i64 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vadd-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vadd-rv64.ll @@ -26,12 +26,13 @@ , , , + i64, i64); define @intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -70,12 +71,13 @@ , , , + i64, i64); define @intrinsic_vadd_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -114,12 +116,13 @@ , , , + i64, i64); define @intrinsic_vadd_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -158,12 +161,13 @@ , , , + i64, i64); define @intrinsic_vadd_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -202,12 +206,13 @@ , , , + i64, i64); define @intrinsic_vadd_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vadd.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -246,12 +251,13 @@ , , , + i64, i64); define @intrinsic_vadd_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vadd.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -260,7 +266,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -290,13 +296,14 @@ , , , + i64, i64); define @intrinsic_vadd_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -335,12 +342,13 @@ , , , + i64, i64); define @intrinsic_vadd_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -379,12 +387,13 @@ , , , + i64, i64); define @intrinsic_vadd_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -423,12 +432,13 @@ , , , + i64, i64); define @intrinsic_vadd_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -467,12 +477,13 @@ , , , + i64, i64); define @intrinsic_vadd_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vadd.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -481,7 +492,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -511,12 +522,13 @@ , , , + i64, i64); define @intrinsic_vadd_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vadd.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -525,7 +537,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -555,13 +567,14 @@ , , , + i64, i64); define @intrinsic_vadd_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -600,12 +613,13 @@ , , , + i64, i64); define @intrinsic_vadd_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -644,12 +658,13 @@ , , , + i64, i64); define @intrinsic_vadd_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -658,7 +673,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -688,12 +703,13 @@ , , , + i64, i64); define @intrinsic_vadd_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vadd.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -702,7 +718,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -732,12 +748,13 @@ , , , + i64, i64); define @intrinsic_vadd_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vadd.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +763,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -776,13 +793,14 @@ , , , + i64, i64); define @intrinsic_vadd_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +809,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -821,12 +839,13 @@ , , , + i64, i64); define @intrinsic_vadd_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +854,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -865,12 +884,13 @@ , , , + i64, i64); define @intrinsic_vadd_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vadd.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +899,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -909,12 +929,13 @@ , , , + i64, i64); define @intrinsic_vadd_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vadd.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +944,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -953,13 +974,14 @@ , , , + i64, i64); define @intrinsic_vadd_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -968,7 +990,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -998,12 +1020,13 @@ , i8, , + i64, i64); define @intrinsic_vadd_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1012,7 +1035,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1042,12 +1065,13 @@ , i8, , + i64, i64); define @intrinsic_vadd_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1056,7 +1080,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1086,12 +1110,13 @@ , i8, , + i64, i64); define @intrinsic_vadd_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1100,7 +1125,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1130,12 +1155,13 @@ , i8, , + i64, i64); define @intrinsic_vadd_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1144,7 +1170,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1174,12 +1200,13 @@ , i8, , + i64, i64); define @intrinsic_vadd_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vadd.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1188,7 +1215,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1218,12 +1245,13 @@ , i8, , + i64, i64); define @intrinsic_vadd_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vadd.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1232,7 +1260,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1262,12 +1290,13 @@ , i8, , + i64, i64); define @intrinsic_vadd_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vadd.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1276,7 +1305,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1306,12 +1335,13 @@ , i16, , + i64, i64); define @intrinsic_vadd_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1320,7 +1350,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1350,12 +1380,13 @@ , i16, , + i64, i64); define @intrinsic_vadd_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1364,7 +1395,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1394,12 +1425,13 @@ , i16, , + i64, i64); define @intrinsic_vadd_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1408,7 +1440,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1438,12 +1470,13 @@ , i16, , + i64, i64); define @intrinsic_vadd_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vadd.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1452,7 +1485,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1482,12 +1515,13 @@ , i16, , + i64, i64); define @intrinsic_vadd_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vadd.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1496,7 +1530,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1526,12 +1560,13 @@ , i16, , + i64, i64); define @intrinsic_vadd_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vadd.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1540,7 +1575,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1570,12 +1605,13 @@ , i32, , + i64, i64); define @intrinsic_vadd_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1584,7 +1620,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1614,12 +1650,13 @@ , i32, , + i64, i64); define @intrinsic_vadd_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1628,7 +1665,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1658,12 +1695,13 @@ , i32, , + i64, i64); define @intrinsic_vadd_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vadd.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1672,7 +1710,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1702,12 +1740,13 @@ , i32, , + i64, i64); define @intrinsic_vadd_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vadd.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1716,7 +1755,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1746,12 +1785,13 @@ , i32, , + i64, i64); define @intrinsic_vadd_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vadd.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1760,7 +1800,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1790,12 +1830,13 @@ , i64, , + i64, i64); define @intrinsic_vadd_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1804,7 +1845,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1834,12 +1875,13 @@ , i64, , + i64, i64); define @intrinsic_vadd_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vadd.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1848,7 +1890,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1878,12 +1920,13 @@ , i64, , + i64, i64); define @intrinsic_vadd_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vadd.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1892,7 +1935,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1922,12 +1965,13 @@ , i64, , + i64, i64); define @intrinsic_vadd_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vadd.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1936,7 +1980,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1959,7 +2003,7 @@ define @intrinsic_vadd_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1968,7 +2012,7 @@ %1, i8 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1991,7 +2035,7 @@ define @intrinsic_vadd_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2000,7 +2044,7 @@ %1, i8 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2023,7 +2067,7 @@ define @intrinsic_vadd_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2032,7 +2076,7 @@ %1, i8 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2055,7 +2099,7 @@ define @intrinsic_vadd_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2064,7 +2108,7 @@ %1, i8 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2087,7 +2131,7 @@ define @intrinsic_vadd_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vadd.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2096,7 +2140,7 @@ %1, i8 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2119,7 +2163,7 @@ define @intrinsic_vadd_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vadd.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2128,7 +2172,7 @@ %1, i8 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2151,7 +2195,7 @@ define @intrinsic_vadd_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vadd.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2160,7 +2204,7 @@ %1, i8 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2183,7 +2227,7 @@ define @intrinsic_vadd_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2192,7 +2236,7 @@ %1, i16 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2215,7 +2259,7 @@ define @intrinsic_vadd_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2224,7 +2268,7 @@ %1, i16 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2247,7 +2291,7 @@ define @intrinsic_vadd_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2256,7 +2300,7 @@ %1, i16 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2279,7 +2323,7 @@ define @intrinsic_vadd_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vadd.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2288,7 +2332,7 @@ %1, i16 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2311,7 +2355,7 @@ define @intrinsic_vadd_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vadd.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2320,7 +2364,7 @@ %1, i16 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2343,7 +2387,7 @@ define @intrinsic_vadd_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vadd.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2352,7 +2396,7 @@ %1, i16 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2375,7 +2419,7 @@ define @intrinsic_vadd_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2384,7 +2428,7 @@ %1, i32 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2407,7 +2451,7 @@ define @intrinsic_vadd_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2416,7 +2460,7 @@ %1, i32 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2439,7 +2483,7 @@ define @intrinsic_vadd_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vadd.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2448,7 +2492,7 @@ %1, i32 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2471,7 +2515,7 @@ define @intrinsic_vadd_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vadd.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2480,7 +2524,7 @@ %1, i32 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2503,7 +2547,7 @@ define @intrinsic_vadd_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vadd.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2512,7 +2556,7 @@ %1, i32 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2535,7 +2579,7 @@ define @intrinsic_vadd_mask_vi_nxv1i64_nxv1i64_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2544,7 +2588,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2567,7 +2611,7 @@ define @intrinsic_vadd_mask_vi_nxv2i64_nxv2i64_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vadd.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2576,7 +2620,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2599,7 +2643,7 @@ define @intrinsic_vadd_mask_vi_nxv4i64_nxv4i64_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vadd.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2608,7 +2652,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2631,7 +2675,7 @@ define @intrinsic_vadd_mask_vi_nxv8i64_nxv8i64_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vadd.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2640,7 +2684,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vand-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vand-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vand-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vand-rv32.ll @@ -26,12 +26,13 @@ , , , + i32, i32); define @intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vand.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -70,12 +71,13 @@ , , , + i32, i32); define @intrinsic_vand_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vand.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -114,12 +116,13 @@ , , , + i32, i32); define @intrinsic_vand_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vand.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -158,12 +161,13 @@ , , , + i32, i32); define @intrinsic_vand_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vand.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -202,12 +206,13 @@ , , , + i32, i32); define @intrinsic_vand_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vand.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -246,12 +251,13 @@ , , , + i32, i32); define @intrinsic_vand_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vand.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -260,7 +266,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -290,13 +296,14 @@ , , , + i32, i32); define @intrinsic_vand_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vand.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -335,12 +342,13 @@ , , , + i32, i32); define @intrinsic_vand_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vand.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -379,12 +387,13 @@ , , , + i32, i32); define @intrinsic_vand_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vand.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -423,12 +432,13 @@ , , , + i32, i32); define @intrinsic_vand_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vand.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -467,12 +477,13 @@ , , , + i32, i32); define @intrinsic_vand_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vand.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -481,7 +492,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -511,12 +522,13 @@ , , , + i32, i32); define @intrinsic_vand_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vand.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -525,7 +537,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -555,13 +567,14 @@ , , , + i32, i32); define @intrinsic_vand_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vand.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -600,12 +613,13 @@ , , , + i32, i32); define @intrinsic_vand_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vand.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -644,12 +658,13 @@ , , , + i32, i32); define @intrinsic_vand_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vand.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -658,7 +673,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -688,12 +703,13 @@ , , , + i32, i32); define @intrinsic_vand_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vand.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -702,7 +718,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -732,12 +748,13 @@ , , , + i32, i32); define @intrinsic_vand_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vand.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +763,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -776,13 +793,14 @@ , , , + i32, i32); define @intrinsic_vand_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vand.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +809,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -821,12 +839,13 @@ , , , + i32, i32); define @intrinsic_vand_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vand.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +854,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -865,12 +884,13 @@ , , , + i32, i32); define @intrinsic_vand_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vand.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +899,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -909,12 +929,13 @@ , , , + i32, i32); define @intrinsic_vand_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vand.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +944,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -953,13 +974,14 @@ , , , + i32, i32); define @intrinsic_vand_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vand.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -968,7 +990,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -998,12 +1020,13 @@ , i8, , + i32, i32); define @intrinsic_vand_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vand.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1012,7 +1035,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1042,12 +1065,13 @@ , i8, , + i32, i32); define @intrinsic_vand_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vand.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1056,7 +1080,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1086,12 +1110,13 @@ , i8, , + i32, i32); define @intrinsic_vand_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vand.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1100,7 +1125,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1130,12 +1155,13 @@ , i8, , + i32, i32); define @intrinsic_vand_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vand.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1144,7 +1170,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1174,12 +1200,13 @@ , i8, , + i32, i32); define @intrinsic_vand_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vand.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1188,7 +1215,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1218,12 +1245,13 @@ , i8, , + i32, i32); define @intrinsic_vand_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vand.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1232,7 +1260,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1262,12 +1290,13 @@ , i8, , + i32, i32); define @intrinsic_vand_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vand.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1276,7 +1305,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1306,12 +1335,13 @@ , i16, , + i32, i32); define @intrinsic_vand_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vand.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1320,7 +1350,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1350,12 +1380,13 @@ , i16, , + i32, i32); define @intrinsic_vand_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vand.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1364,7 +1395,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1394,12 +1425,13 @@ , i16, , + i32, i32); define @intrinsic_vand_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vand.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1408,7 +1440,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1438,12 +1470,13 @@ , i16, , + i32, i32); define @intrinsic_vand_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vand.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1452,7 +1485,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1482,12 +1515,13 @@ , i16, , + i32, i32); define @intrinsic_vand_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vand.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1496,7 +1530,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1526,12 +1560,13 @@ , i16, , + i32, i32); define @intrinsic_vand_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vand.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1540,7 +1575,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1570,12 +1605,13 @@ , i32, , + i32, i32); define @intrinsic_vand_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vand.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1584,7 +1620,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1614,12 +1650,13 @@ , i32, , + i32, i32); define @intrinsic_vand_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vand.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1628,7 +1665,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1658,12 +1695,13 @@ , i32, , + i32, i32); define @intrinsic_vand_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vand.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1672,7 +1710,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1702,12 +1740,13 @@ , i32, , + i32, i32); define @intrinsic_vand_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vand.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1716,7 +1755,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1746,12 +1785,13 @@ , i32, , + i32, i32); define @intrinsic_vand_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vand.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1760,7 +1800,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1796,6 +1836,7 @@ , i64, , + i32, i32); define @intrinsic_vand_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1804,10 +1845,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v25, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vand.vv v8, v9, v25, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1817,7 +1858,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1853,6 +1894,7 @@ , i64, , + i32, i32); define @intrinsic_vand_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1861,10 +1903,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v26, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vand.vv v8, v10, v26, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1874,7 +1916,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1910,6 +1952,7 @@ , i64, , + i32, i32); define @intrinsic_vand_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1918,10 +1961,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v28, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vand.vv v8, v12, v28, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1931,7 +1974,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1967,6 +2010,7 @@ , i64, , + i32, i32); define @intrinsic_vand_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1975,10 +2019,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v24, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vand.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1988,7 +2032,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2011,7 +2055,7 @@ define @intrinsic_vand_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vand.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2020,7 +2064,7 @@ %1, i8 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2043,7 +2087,7 @@ define @intrinsic_vand_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vand.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2052,7 +2096,7 @@ %1, i8 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2075,7 +2119,7 @@ define @intrinsic_vand_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vand.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2084,7 +2128,7 @@ %1, i8 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2107,7 +2151,7 @@ define @intrinsic_vand_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vand.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2116,7 +2160,7 @@ %1, i8 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2139,7 +2183,7 @@ define @intrinsic_vand_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vand.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2148,7 +2192,7 @@ %1, i8 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2171,7 +2215,7 @@ define @intrinsic_vand_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vand.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2180,7 +2224,7 @@ %1, i8 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2203,7 +2247,7 @@ define @intrinsic_vand_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vand.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2212,7 +2256,7 @@ %1, i8 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2235,7 +2279,7 @@ define @intrinsic_vand_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vand.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2244,7 +2288,7 @@ %1, i16 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2267,7 +2311,7 @@ define @intrinsic_vand_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vand.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2276,7 +2320,7 @@ %1, i16 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2299,7 +2343,7 @@ define @intrinsic_vand_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vand.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2308,7 +2352,7 @@ %1, i16 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2331,7 +2375,7 @@ define @intrinsic_vand_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vand.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2340,7 +2384,7 @@ %1, i16 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2363,7 +2407,7 @@ define @intrinsic_vand_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vand.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2372,7 +2416,7 @@ %1, i16 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2395,7 +2439,7 @@ define @intrinsic_vand_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vand.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2404,7 +2448,7 @@ %1, i16 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2427,7 +2471,7 @@ define @intrinsic_vand_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vand.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2436,7 +2480,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2459,7 +2503,7 @@ define @intrinsic_vand_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vand.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2468,7 +2512,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2491,7 +2535,7 @@ define @intrinsic_vand_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vand.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2500,7 +2544,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2523,7 +2567,7 @@ define @intrinsic_vand_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vand.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2532,7 +2576,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2555,7 +2599,7 @@ define @intrinsic_vand_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vand.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2564,7 +2608,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2587,7 +2631,7 @@ define @intrinsic_vand_mask_vi_nxv1i64_nxv1i64_i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vand.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2596,7 +2640,7 @@ %1, i64 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2619,7 +2663,7 @@ define @intrinsic_vand_mask_vi_nxv2i64_nxv2i64_i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vand.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2628,7 +2672,7 @@ %1, i64 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2651,7 +2695,7 @@ define @intrinsic_vand_mask_vi_nxv4i64_nxv4i64_i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vand.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2660,7 +2704,7 @@ %1, i64 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2683,7 +2727,7 @@ define @intrinsic_vand_mask_vi_nxv8i64_nxv8i64_i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vand.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2692,7 +2736,7 @@ %1, i64 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vand-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vand-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vand-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vand-rv64.ll @@ -26,12 +26,13 @@ , , , + i64, i64); define @intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vand.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -70,12 +71,13 @@ , , , + i64, i64); define @intrinsic_vand_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vand.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -114,12 +116,13 @@ , , , + i64, i64); define @intrinsic_vand_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vand.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -158,12 +161,13 @@ , , , + i64, i64); define @intrinsic_vand_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vand.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -202,12 +206,13 @@ , , , + i64, i64); define @intrinsic_vand_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vand.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -246,12 +251,13 @@ , , , + i64, i64); define @intrinsic_vand_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vand.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -260,7 +266,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -290,13 +296,14 @@ , , , + i64, i64); define @intrinsic_vand_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vand.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -335,12 +342,13 @@ , , , + i64, i64); define @intrinsic_vand_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vand.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -379,12 +387,13 @@ , , , + i64, i64); define @intrinsic_vand_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vand.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -423,12 +432,13 @@ , , , + i64, i64); define @intrinsic_vand_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vand.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -467,12 +477,13 @@ , , , + i64, i64); define @intrinsic_vand_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vand.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -481,7 +492,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -511,12 +522,13 @@ , , , + i64, i64); define @intrinsic_vand_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vand.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -525,7 +537,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -555,13 +567,14 @@ , , , + i64, i64); define @intrinsic_vand_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vand.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -600,12 +613,13 @@ , , , + i64, i64); define @intrinsic_vand_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vand.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -644,12 +658,13 @@ , , , + i64, i64); define @intrinsic_vand_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vand.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -658,7 +673,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -688,12 +703,13 @@ , , , + i64, i64); define @intrinsic_vand_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vand.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -702,7 +718,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -732,12 +748,13 @@ , , , + i64, i64); define @intrinsic_vand_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vand.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +763,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -776,13 +793,14 @@ , , , + i64, i64); define @intrinsic_vand_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vand.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +809,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -821,12 +839,13 @@ , , , + i64, i64); define @intrinsic_vand_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vand.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +854,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -865,12 +884,13 @@ , , , + i64, i64); define @intrinsic_vand_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vand.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +899,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -909,12 +929,13 @@ , , , + i64, i64); define @intrinsic_vand_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vand.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +944,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -953,13 +974,14 @@ , , , + i64, i64); define @intrinsic_vand_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vand.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -968,7 +990,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -998,12 +1020,13 @@ , i8, , + i64, i64); define @intrinsic_vand_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vand.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1012,7 +1035,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1042,12 +1065,13 @@ , i8, , + i64, i64); define @intrinsic_vand_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vand.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1056,7 +1080,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1086,12 +1110,13 @@ , i8, , + i64, i64); define @intrinsic_vand_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vand.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1100,7 +1125,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1130,12 +1155,13 @@ , i8, , + i64, i64); define @intrinsic_vand_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vand.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1144,7 +1170,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1174,12 +1200,13 @@ , i8, , + i64, i64); define @intrinsic_vand_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vand.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1188,7 +1215,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1218,12 +1245,13 @@ , i8, , + i64, i64); define @intrinsic_vand_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vand.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1232,7 +1260,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1262,12 +1290,13 @@ , i8, , + i64, i64); define @intrinsic_vand_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vand.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1276,7 +1305,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1306,12 +1335,13 @@ , i16, , + i64, i64); define @intrinsic_vand_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vand.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1320,7 +1350,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1350,12 +1380,13 @@ , i16, , + i64, i64); define @intrinsic_vand_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vand.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1364,7 +1395,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1394,12 +1425,13 @@ , i16, , + i64, i64); define @intrinsic_vand_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vand.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1408,7 +1440,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1438,12 +1470,13 @@ , i16, , + i64, i64); define @intrinsic_vand_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vand.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1452,7 +1485,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1482,12 +1515,13 @@ , i16, , + i64, i64); define @intrinsic_vand_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vand.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1496,7 +1530,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1526,12 +1560,13 @@ , i16, , + i64, i64); define @intrinsic_vand_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vand.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1540,7 +1575,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1570,12 +1605,13 @@ , i32, , + i64, i64); define @intrinsic_vand_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vand.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1584,7 +1620,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1614,12 +1650,13 @@ , i32, , + i64, i64); define @intrinsic_vand_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vand.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1628,7 +1665,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1658,12 +1695,13 @@ , i32, , + i64, i64); define @intrinsic_vand_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vand.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1672,7 +1710,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1702,12 +1740,13 @@ , i32, , + i64, i64); define @intrinsic_vand_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vand.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1716,7 +1755,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1746,12 +1785,13 @@ , i32, , + i64, i64); define @intrinsic_vand_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vand.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1760,7 +1800,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1790,12 +1830,13 @@ , i64, , + i64, i64); define @intrinsic_vand_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vand.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1804,7 +1845,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1834,12 +1875,13 @@ , i64, , + i64, i64); define @intrinsic_vand_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vand.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1848,7 +1890,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1878,12 +1920,13 @@ , i64, , + i64, i64); define @intrinsic_vand_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vand.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1892,7 +1935,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1922,12 +1965,13 @@ , i64, , + i64, i64); define @intrinsic_vand_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vand.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1936,7 +1980,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1959,7 +2003,7 @@ define @intrinsic_vand_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vand.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1968,7 +2012,7 @@ %1, i8 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1991,7 +2035,7 @@ define @intrinsic_vand_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vand.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2000,7 +2044,7 @@ %1, i8 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2023,7 +2067,7 @@ define @intrinsic_vand_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vand.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2032,7 +2076,7 @@ %1, i8 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2055,7 +2099,7 @@ define @intrinsic_vand_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vand.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2064,7 +2108,7 @@ %1, i8 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2087,7 +2131,7 @@ define @intrinsic_vand_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vand.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2096,7 +2140,7 @@ %1, i8 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2119,7 +2163,7 @@ define @intrinsic_vand_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vand.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2128,7 +2172,7 @@ %1, i8 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2151,7 +2195,7 @@ define @intrinsic_vand_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vand.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2160,7 +2204,7 @@ %1, i8 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2183,7 +2227,7 @@ define @intrinsic_vand_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vand.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2192,7 +2236,7 @@ %1, i16 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2215,7 +2259,7 @@ define @intrinsic_vand_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vand.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2224,7 +2268,7 @@ %1, i16 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2247,7 +2291,7 @@ define @intrinsic_vand_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vand.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2256,7 +2300,7 @@ %1, i16 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2279,7 +2323,7 @@ define @intrinsic_vand_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vand.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2288,7 +2332,7 @@ %1, i16 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2311,7 +2355,7 @@ define @intrinsic_vand_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vand.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2320,7 +2364,7 @@ %1, i16 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2343,7 +2387,7 @@ define @intrinsic_vand_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vand.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2352,7 +2396,7 @@ %1, i16 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2375,7 +2419,7 @@ define @intrinsic_vand_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vand.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2384,7 +2428,7 @@ %1, i32 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2407,7 +2451,7 @@ define @intrinsic_vand_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vand.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2416,7 +2460,7 @@ %1, i32 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2439,7 +2483,7 @@ define @intrinsic_vand_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vand.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2448,7 +2492,7 @@ %1, i32 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2471,7 +2515,7 @@ define @intrinsic_vand_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vand.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2480,7 +2524,7 @@ %1, i32 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2503,7 +2547,7 @@ define @intrinsic_vand_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vand.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2512,7 +2556,7 @@ %1, i32 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2535,7 +2579,7 @@ define @intrinsic_vand_mask_vi_nxv1i64_nxv1i64_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vand.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2544,7 +2588,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2567,7 +2611,7 @@ define @intrinsic_vand_mask_vi_nxv2i64_nxv2i64_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vand.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2576,7 +2620,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2599,7 +2643,7 @@ define @intrinsic_vand_mask_vi_nxv4i64_nxv4i64_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vand.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2608,7 +2652,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2631,7 +2675,7 @@ define @intrinsic_vand_mask_vi_nxv8i64_nxv8i64_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vand.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2640,7 +2684,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vasub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vasub-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vasub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vasub-rv32.ll @@ -26,12 +26,13 @@ , , , + i32, i32); define @intrinsic_vasub_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vasub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -70,12 +71,13 @@ , , , + i32, i32); define @intrinsic_vasub_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vasub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -114,12 +116,13 @@ , , , + i32, i32); define @intrinsic_vasub_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vasub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -158,12 +161,13 @@ , , , + i32, i32); define @intrinsic_vasub_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vasub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -202,12 +206,13 @@ , , , + i32, i32); define @intrinsic_vasub_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vasub.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -246,12 +251,13 @@ , , , + i32, i32); define @intrinsic_vasub_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vasub.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -260,7 +266,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -290,13 +296,14 @@ , , , + i32, i32); define @intrinsic_vasub_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vasub.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -335,12 +342,13 @@ , , , + i32, i32); define @intrinsic_vasub_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vasub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -379,12 +387,13 @@ , , , + i32, i32); define @intrinsic_vasub_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vasub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -423,12 +432,13 @@ , , , + i32, i32); define @intrinsic_vasub_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vasub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -467,12 +477,13 @@ , , , + i32, i32); define @intrinsic_vasub_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vasub.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -481,7 +492,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -511,12 +522,13 @@ , , , + i32, i32); define @intrinsic_vasub_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vasub.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -525,7 +537,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -555,13 +567,14 @@ , , , + i32, i32); define @intrinsic_vasub_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vasub.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -600,12 +613,13 @@ , , , + i32, i32); define @intrinsic_vasub_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vasub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -644,12 +658,13 @@ , , , + i32, i32); define @intrinsic_vasub_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vasub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -658,7 +673,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -688,12 +703,13 @@ , , , + i32, i32); define @intrinsic_vasub_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vasub.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -702,7 +718,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -732,12 +748,13 @@ , , , + i32, i32); define @intrinsic_vasub_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vasub.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +763,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -776,13 +793,14 @@ , , , + i32, i32); define @intrinsic_vasub_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vasub.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +809,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -821,12 +839,13 @@ , , , + i32, i32); define @intrinsic_vasub_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vasub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +854,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -865,12 +884,13 @@ , , , + i32, i32); define @intrinsic_vasub_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vasub.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +899,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -909,12 +929,13 @@ , , , + i32, i32); define @intrinsic_vasub_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vasub.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +944,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -953,13 +974,14 @@ , , , + i32, i32); define @intrinsic_vasub_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vasub.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -968,7 +990,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -998,12 +1020,13 @@ , i8, , + i32, i32); define @intrinsic_vasub_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vasub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1012,7 +1035,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1042,12 +1065,13 @@ , i8, , + i32, i32); define @intrinsic_vasub_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vasub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1056,7 +1080,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1086,12 +1110,13 @@ , i8, , + i32, i32); define @intrinsic_vasub_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vasub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1100,7 +1125,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1130,12 +1155,13 @@ , i8, , + i32, i32); define @intrinsic_vasub_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vasub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1144,7 +1170,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1174,12 +1200,13 @@ , i8, , + i32, i32); define @intrinsic_vasub_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vasub.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1188,7 +1215,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1218,12 +1245,13 @@ , i8, , + i32, i32); define @intrinsic_vasub_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vasub.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1232,7 +1260,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1262,12 +1290,13 @@ , i8, , + i32, i32); define @intrinsic_vasub_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vasub.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1276,7 +1305,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1306,12 +1335,13 @@ , i16, , + i32, i32); define @intrinsic_vasub_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vasub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1320,7 +1350,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1350,12 +1380,13 @@ , i16, , + i32, i32); define @intrinsic_vasub_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vasub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1364,7 +1395,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1394,12 +1425,13 @@ , i16, , + i32, i32); define @intrinsic_vasub_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vasub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1408,7 +1440,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1438,12 +1470,13 @@ , i16, , + i32, i32); define @intrinsic_vasub_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vasub.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1452,7 +1485,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1482,12 +1515,13 @@ , i16, , + i32, i32); define @intrinsic_vasub_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vasub.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1496,7 +1530,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1526,12 +1560,13 @@ , i16, , + i32, i32); define @intrinsic_vasub_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vasub.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1540,7 +1575,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1570,12 +1605,13 @@ , i32, , + i32, i32); define @intrinsic_vasub_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vasub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1584,7 +1620,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1614,12 +1650,13 @@ , i32, , + i32, i32); define @intrinsic_vasub_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vasub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1628,7 +1665,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1658,12 +1695,13 @@ , i32, , + i32, i32); define @intrinsic_vasub_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vasub.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1672,7 +1710,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1702,12 +1740,13 @@ , i32, , + i32, i32); define @intrinsic_vasub_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vasub.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1716,7 +1755,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1746,12 +1785,13 @@ , i32, , + i32, i32); define @intrinsic_vasub_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vasub.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1760,7 +1800,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1796,6 +1836,7 @@ , i64, , + i32, i32); define @intrinsic_vasub_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1804,10 +1845,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v25, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vasub.vv v8, v9, v25, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1817,7 +1858,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1853,6 +1894,7 @@ , i64, , + i32, i32); define @intrinsic_vasub_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1861,10 +1903,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v26, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vasub.vv v8, v10, v26, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1874,7 +1916,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1910,6 +1952,7 @@ , i64, , + i32, i32); define @intrinsic_vasub_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1918,10 +1961,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v28, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vasub.vv v8, v12, v28, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1931,7 +1974,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1967,6 +2010,7 @@ , i64, , + i32, i32); define @intrinsic_vasub_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1975,10 +2019,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v24, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vasub.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1988,7 +2032,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vasub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vasub-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vasub-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vasub-rv64.ll @@ -26,12 +26,13 @@ , , , + i64, i64); define @intrinsic_vasub_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vasub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -70,12 +71,13 @@ , , , + i64, i64); define @intrinsic_vasub_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vasub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -114,12 +116,13 @@ , , , + i64, i64); define @intrinsic_vasub_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vasub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -158,12 +161,13 @@ , , , + i64, i64); define @intrinsic_vasub_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vasub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -202,12 +206,13 @@ , , , + i64, i64); define @intrinsic_vasub_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vasub.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -246,12 +251,13 @@ , , , + i64, i64); define @intrinsic_vasub_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vasub.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -260,7 +266,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -290,13 +296,14 @@ , , , + i64, i64); define @intrinsic_vasub_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vasub.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -335,12 +342,13 @@ , , , + i64, i64); define @intrinsic_vasub_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vasub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -379,12 +387,13 @@ , , , + i64, i64); define @intrinsic_vasub_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vasub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -423,12 +432,13 @@ , , , + i64, i64); define @intrinsic_vasub_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vasub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -467,12 +477,13 @@ , , , + i64, i64); define @intrinsic_vasub_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vasub.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -481,7 +492,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -511,12 +522,13 @@ , , , + i64, i64); define @intrinsic_vasub_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vasub.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -525,7 +537,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -555,13 +567,14 @@ , , , + i64, i64); define @intrinsic_vasub_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vasub.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -600,12 +613,13 @@ , , , + i64, i64); define @intrinsic_vasub_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vasub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -644,12 +658,13 @@ , , , + i64, i64); define @intrinsic_vasub_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vasub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -658,7 +673,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -688,12 +703,13 @@ , , , + i64, i64); define @intrinsic_vasub_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vasub.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -702,7 +718,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -732,12 +748,13 @@ , , , + i64, i64); define @intrinsic_vasub_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vasub.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +763,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -776,13 +793,14 @@ , , , + i64, i64); define @intrinsic_vasub_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vasub.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +809,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -821,12 +839,13 @@ , , , + i64, i64); define @intrinsic_vasub_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vasub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +854,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -865,12 +884,13 @@ , , , + i64, i64); define @intrinsic_vasub_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vasub.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +899,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -909,12 +929,13 @@ , , , + i64, i64); define @intrinsic_vasub_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vasub.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +944,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -953,13 +974,14 @@ , , , + i64, i64); define @intrinsic_vasub_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vasub.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -968,7 +990,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -998,12 +1020,13 @@ , i8, , + i64, i64); define @intrinsic_vasub_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vasub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1012,7 +1035,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1042,12 +1065,13 @@ , i8, , + i64, i64); define @intrinsic_vasub_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vasub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1056,7 +1080,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1086,12 +1110,13 @@ , i8, , + i64, i64); define @intrinsic_vasub_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vasub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1100,7 +1125,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1130,12 +1155,13 @@ , i8, , + i64, i64); define @intrinsic_vasub_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vasub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1144,7 +1170,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1174,12 +1200,13 @@ , i8, , + i64, i64); define @intrinsic_vasub_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vasub.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1188,7 +1215,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1218,12 +1245,13 @@ , i8, , + i64, i64); define @intrinsic_vasub_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vasub.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1232,7 +1260,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1262,12 +1290,13 @@ , i8, , + i64, i64); define @intrinsic_vasub_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vasub.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1276,7 +1305,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1306,12 +1335,13 @@ , i16, , + i64, i64); define @intrinsic_vasub_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vasub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1320,7 +1350,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1350,12 +1380,13 @@ , i16, , + i64, i64); define @intrinsic_vasub_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vasub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1364,7 +1395,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1394,12 +1425,13 @@ , i16, , + i64, i64); define @intrinsic_vasub_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vasub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1408,7 +1440,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1438,12 +1470,13 @@ , i16, , + i64, i64); define @intrinsic_vasub_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vasub.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1452,7 +1485,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1482,12 +1515,13 @@ , i16, , + i64, i64); define @intrinsic_vasub_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vasub.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1496,7 +1530,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1526,12 +1560,13 @@ , i16, , + i64, i64); define @intrinsic_vasub_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vasub.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1540,7 +1575,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1570,12 +1605,13 @@ , i32, , + i64, i64); define @intrinsic_vasub_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vasub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1584,7 +1620,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1614,12 +1650,13 @@ , i32, , + i64, i64); define @intrinsic_vasub_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vasub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1628,7 +1665,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1658,12 +1695,13 @@ , i32, , + i64, i64); define @intrinsic_vasub_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vasub.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1672,7 +1710,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1702,12 +1740,13 @@ , i32, , + i64, i64); define @intrinsic_vasub_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vasub.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1716,7 +1755,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1746,12 +1785,13 @@ , i32, , + i64, i64); define @intrinsic_vasub_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vasub.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1760,7 +1800,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1790,12 +1830,13 @@ , i64, , + i64, i64); define @intrinsic_vasub_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vasub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1804,7 +1845,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1834,12 +1875,13 @@ , i64, , + i64, i64); define @intrinsic_vasub_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vasub.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1848,7 +1890,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1878,12 +1920,13 @@ , i64, , + i64, i64); define @intrinsic_vasub_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vasub.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1892,7 +1935,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1922,12 +1965,13 @@ , i64, , + i64, i64); define @intrinsic_vasub_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vasub.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1936,7 +1980,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vasubu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vasubu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vasubu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vasubu-rv32.ll @@ -26,12 +26,13 @@ , , , + i32, i32); define @intrinsic_vasubu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vasubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -70,12 +71,13 @@ , , , + i32, i32); define @intrinsic_vasubu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vasubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -114,12 +116,13 @@ , , , + i32, i32); define @intrinsic_vasubu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vasubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -158,12 +161,13 @@ , , , + i32, i32); define @intrinsic_vasubu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vasubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -202,12 +206,13 @@ , , , + i32, i32); define @intrinsic_vasubu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vasubu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -246,12 +251,13 @@ , , , + i32, i32); define @intrinsic_vasubu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vasubu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -260,7 +266,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -290,13 +296,14 @@ , , , + i32, i32); define @intrinsic_vasubu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vasubu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -335,12 +342,13 @@ , , , + i32, i32); define @intrinsic_vasubu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vasubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -379,12 +387,13 @@ , , , + i32, i32); define @intrinsic_vasubu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vasubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -423,12 +432,13 @@ , , , + i32, i32); define @intrinsic_vasubu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vasubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -467,12 +477,13 @@ , , , + i32, i32); define @intrinsic_vasubu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vasubu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -481,7 +492,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -511,12 +522,13 @@ , , , + i32, i32); define @intrinsic_vasubu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vasubu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -525,7 +537,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -555,13 +567,14 @@ , , , + i32, i32); define @intrinsic_vasubu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vasubu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -600,12 +613,13 @@ , , , + i32, i32); define @intrinsic_vasubu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vasubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -644,12 +658,13 @@ , , , + i32, i32); define @intrinsic_vasubu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vasubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -658,7 +673,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -688,12 +703,13 @@ , , , + i32, i32); define @intrinsic_vasubu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vasubu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -702,7 +718,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -732,12 +748,13 @@ , , , + i32, i32); define @intrinsic_vasubu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vasubu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +763,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -776,13 +793,14 @@ , , , + i32, i32); define @intrinsic_vasubu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vasubu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +809,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -821,12 +839,13 @@ , , , + i32, i32); define @intrinsic_vasubu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vasubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +854,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -865,12 +884,13 @@ , , , + i32, i32); define @intrinsic_vasubu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vasubu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +899,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -909,12 +929,13 @@ , , , + i32, i32); define @intrinsic_vasubu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vasubu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +944,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -953,13 +974,14 @@ , , , + i32, i32); define @intrinsic_vasubu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vasubu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -968,7 +990,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -998,12 +1020,13 @@ , i8, , + i32, i32); define @intrinsic_vasubu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vasubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1012,7 +1035,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1042,12 +1065,13 @@ , i8, , + i32, i32); define @intrinsic_vasubu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vasubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1056,7 +1080,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1086,12 +1110,13 @@ , i8, , + i32, i32); define @intrinsic_vasubu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vasubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1100,7 +1125,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1130,12 +1155,13 @@ , i8, , + i32, i32); define @intrinsic_vasubu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vasubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1144,7 +1170,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1174,12 +1200,13 @@ , i8, , + i32, i32); define @intrinsic_vasubu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vasubu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1188,7 +1215,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1218,12 +1245,13 @@ , i8, , + i32, i32); define @intrinsic_vasubu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vasubu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1232,7 +1260,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1262,12 +1290,13 @@ , i8, , + i32, i32); define @intrinsic_vasubu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vasubu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1276,7 +1305,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1306,12 +1335,13 @@ , i16, , + i32, i32); define @intrinsic_vasubu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vasubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1320,7 +1350,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1350,12 +1380,13 @@ , i16, , + i32, i32); define @intrinsic_vasubu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vasubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1364,7 +1395,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1394,12 +1425,13 @@ , i16, , + i32, i32); define @intrinsic_vasubu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vasubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1408,7 +1440,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1438,12 +1470,13 @@ , i16, , + i32, i32); define @intrinsic_vasubu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vasubu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1452,7 +1485,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1482,12 +1515,13 @@ , i16, , + i32, i32); define @intrinsic_vasubu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vasubu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1496,7 +1530,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1526,12 +1560,13 @@ , i16, , + i32, i32); define @intrinsic_vasubu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vasubu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1540,7 +1575,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1570,12 +1605,13 @@ , i32, , + i32, i32); define @intrinsic_vasubu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vasubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1584,7 +1620,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1614,12 +1650,13 @@ , i32, , + i32, i32); define @intrinsic_vasubu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vasubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1628,7 +1665,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1658,12 +1695,13 @@ , i32, , + i32, i32); define @intrinsic_vasubu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vasubu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1672,7 +1710,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1702,12 +1740,13 @@ , i32, , + i32, i32); define @intrinsic_vasubu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vasubu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1716,7 +1755,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1746,12 +1785,13 @@ , i32, , + i32, i32); define @intrinsic_vasubu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vasubu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1760,7 +1800,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1796,6 +1836,7 @@ , i64, , + i32, i32); define @intrinsic_vasubu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1804,10 +1845,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v25, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vasubu.vv v8, v9, v25, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1817,7 +1858,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1853,6 +1894,7 @@ , i64, , + i32, i32); define @intrinsic_vasubu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1861,10 +1903,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v26, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vasubu.vv v8, v10, v26, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1874,7 +1916,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1910,6 +1952,7 @@ , i64, , + i32, i32); define @intrinsic_vasubu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1918,10 +1961,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v28, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vasubu.vv v8, v12, v28, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1931,7 +1974,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1967,6 +2010,7 @@ , i64, , + i32, i32); define @intrinsic_vasubu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1975,10 +2019,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v24, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vasubu.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1988,7 +2032,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vasubu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vasubu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vasubu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vasubu-rv64.ll @@ -26,12 +26,13 @@ , , , + i64, i64); define @intrinsic_vasubu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vasubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -70,12 +71,13 @@ , , , + i64, i64); define @intrinsic_vasubu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vasubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -114,12 +116,13 @@ , , , + i64, i64); define @intrinsic_vasubu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vasubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -158,12 +161,13 @@ , , , + i64, i64); define @intrinsic_vasubu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vasubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -202,12 +206,13 @@ , , , + i64, i64); define @intrinsic_vasubu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vasubu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -246,12 +251,13 @@ , , , + i64, i64); define @intrinsic_vasubu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vasubu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -260,7 +266,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -290,13 +296,14 @@ , , , + i64, i64); define @intrinsic_vasubu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vasubu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -335,12 +342,13 @@ , , , + i64, i64); define @intrinsic_vasubu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vasubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -379,12 +387,13 @@ , , , + i64, i64); define @intrinsic_vasubu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vasubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -423,12 +432,13 @@ , , , + i64, i64); define @intrinsic_vasubu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vasubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -467,12 +477,13 @@ , , , + i64, i64); define @intrinsic_vasubu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vasubu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -481,7 +492,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -511,12 +522,13 @@ , , , + i64, i64); define @intrinsic_vasubu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vasubu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -525,7 +537,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -555,13 +567,14 @@ , , , + i64, i64); define @intrinsic_vasubu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vasubu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -600,12 +613,13 @@ , , , + i64, i64); define @intrinsic_vasubu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vasubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -644,12 +658,13 @@ , , , + i64, i64); define @intrinsic_vasubu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vasubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -658,7 +673,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -688,12 +703,13 @@ , , , + i64, i64); define @intrinsic_vasubu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vasubu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -702,7 +718,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -732,12 +748,13 @@ , , , + i64, i64); define @intrinsic_vasubu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vasubu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +763,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -776,13 +793,14 @@ , , , + i64, i64); define @intrinsic_vasubu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vasubu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +809,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -821,12 +839,13 @@ , , , + i64, i64); define @intrinsic_vasubu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vasubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +854,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -865,12 +884,13 @@ , , , + i64, i64); define @intrinsic_vasubu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vasubu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +899,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -909,12 +929,13 @@ , , , + i64, i64); define @intrinsic_vasubu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vasubu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +944,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -953,13 +974,14 @@ , , , + i64, i64); define @intrinsic_vasubu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vasubu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -968,7 +990,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -998,12 +1020,13 @@ , i8, , + i64, i64); define @intrinsic_vasubu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vasubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1012,7 +1035,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1042,12 +1065,13 @@ , i8, , + i64, i64); define @intrinsic_vasubu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vasubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1056,7 +1080,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1086,12 +1110,13 @@ , i8, , + i64, i64); define @intrinsic_vasubu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vasubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1100,7 +1125,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1130,12 +1155,13 @@ , i8, , + i64, i64); define @intrinsic_vasubu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vasubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1144,7 +1170,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1174,12 +1200,13 @@ , i8, , + i64, i64); define @intrinsic_vasubu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vasubu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1188,7 +1215,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1218,12 +1245,13 @@ , i8, , + i64, i64); define @intrinsic_vasubu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vasubu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1232,7 +1260,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1262,12 +1290,13 @@ , i8, , + i64, i64); define @intrinsic_vasubu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vasubu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1276,7 +1305,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1306,12 +1335,13 @@ , i16, , + i64, i64); define @intrinsic_vasubu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vasubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1320,7 +1350,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1350,12 +1380,13 @@ , i16, , + i64, i64); define @intrinsic_vasubu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vasubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1364,7 +1395,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1394,12 +1425,13 @@ , i16, , + i64, i64); define @intrinsic_vasubu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vasubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1408,7 +1440,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1438,12 +1470,13 @@ , i16, , + i64, i64); define @intrinsic_vasubu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vasubu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1452,7 +1485,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1482,12 +1515,13 @@ , i16, , + i64, i64); define @intrinsic_vasubu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vasubu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1496,7 +1530,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1526,12 +1560,13 @@ , i16, , + i64, i64); define @intrinsic_vasubu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vasubu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1540,7 +1575,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1570,12 +1605,13 @@ , i32, , + i64, i64); define @intrinsic_vasubu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vasubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1584,7 +1620,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1614,12 +1650,13 @@ , i32, , + i64, i64); define @intrinsic_vasubu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vasubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1628,7 +1665,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1658,12 +1695,13 @@ , i32, , + i64, i64); define @intrinsic_vasubu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vasubu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1672,7 +1710,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1702,12 +1740,13 @@ , i32, , + i64, i64); define @intrinsic_vasubu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vasubu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1716,7 +1755,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1746,12 +1785,13 @@ , i32, , + i64, i64); define @intrinsic_vasubu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vasubu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1760,7 +1800,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1790,12 +1830,13 @@ , i64, , + i64, i64); define @intrinsic_vasubu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vasubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1804,7 +1845,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1834,12 +1875,13 @@ , i64, , + i64, i64); define @intrinsic_vasubu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vasubu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1848,7 +1890,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1878,12 +1920,13 @@ , i64, , + i64, i64); define @intrinsic_vasubu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vasubu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1892,7 +1935,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1922,12 +1965,13 @@ , i64, , + i64, i64); define @intrinsic_vasubu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vasubu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1936,7 +1980,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll @@ -26,12 +26,13 @@ , , , + i32, i32); define @intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vdiv.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -70,12 +71,13 @@ , , , + i32, i32); define @intrinsic_vdiv_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vdiv.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -114,12 +116,13 @@ , , , + i32, i32); define @intrinsic_vdiv_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vdiv.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -158,12 +161,13 @@ , , , + i32, i32); define @intrinsic_vdiv_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vdiv.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -202,12 +206,13 @@ , , , + i32, i32); define @intrinsic_vdiv_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vdiv.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -246,12 +251,13 @@ , , , + i32, i32); define @intrinsic_vdiv_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vdiv.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -260,7 +266,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -290,13 +296,14 @@ , , , + i32, i32); define @intrinsic_vdiv_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vdiv.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -335,12 +342,13 @@ , , , + i32, i32); define @intrinsic_vdiv_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vdiv.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -379,12 +387,13 @@ , , , + i32, i32); define @intrinsic_vdiv_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vdiv.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -423,12 +432,13 @@ , , , + i32, i32); define @intrinsic_vdiv_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vdiv.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -467,12 +477,13 @@ , , , + i32, i32); define @intrinsic_vdiv_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vdiv.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -481,7 +492,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -511,12 +522,13 @@ , , , + i32, i32); define @intrinsic_vdiv_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vdiv.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -525,7 +537,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -555,13 +567,14 @@ , , , + i32, i32); define @intrinsic_vdiv_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vdiv.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -600,12 +613,13 @@ , , , + i32, i32); define @intrinsic_vdiv_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vdiv.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -644,12 +658,13 @@ , , , + i32, i32); define @intrinsic_vdiv_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vdiv.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -658,7 +673,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -688,12 +703,13 @@ , , , + i32, i32); define @intrinsic_vdiv_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vdiv.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -702,7 +718,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -732,12 +748,13 @@ , , , + i32, i32); define @intrinsic_vdiv_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vdiv.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +763,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -776,13 +793,14 @@ , , , + i32, i32); define @intrinsic_vdiv_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vdiv.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +809,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -821,12 +839,13 @@ , , , + i32, i32); define @intrinsic_vdiv_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vdiv.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +854,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -865,12 +884,13 @@ , , , + i32, i32); define @intrinsic_vdiv_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vdiv.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +899,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -909,12 +929,13 @@ , , , + i32, i32); define @intrinsic_vdiv_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vdiv.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +944,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -953,13 +974,14 @@ , , , + i32, i32); define @intrinsic_vdiv_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vdiv.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -968,7 +990,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -998,12 +1020,13 @@ , i8, , + i32, i32); define @intrinsic_vdiv_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vdiv.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1012,7 +1035,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1042,12 +1065,13 @@ , i8, , + i32, i32); define @intrinsic_vdiv_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vdiv.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1056,7 +1080,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1086,12 +1110,13 @@ , i8, , + i32, i32); define @intrinsic_vdiv_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vdiv.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1100,7 +1125,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1130,12 +1155,13 @@ , i8, , + i32, i32); define @intrinsic_vdiv_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vdiv.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1144,7 +1170,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1174,12 +1200,13 @@ , i8, , + i32, i32); define @intrinsic_vdiv_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vdiv.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1188,7 +1215,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1218,12 +1245,13 @@ , i8, , + i32, i32); define @intrinsic_vdiv_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vdiv.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1232,7 +1260,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1262,12 +1290,13 @@ , i8, , + i32, i32); define @intrinsic_vdiv_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vdiv.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1276,7 +1305,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1306,12 +1335,13 @@ , i16, , + i32, i32); define @intrinsic_vdiv_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vdiv.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1320,7 +1350,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1350,12 +1380,13 @@ , i16, , + i32, i32); define @intrinsic_vdiv_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vdiv.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1364,7 +1395,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1394,12 +1425,13 @@ , i16, , + i32, i32); define @intrinsic_vdiv_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vdiv.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1408,7 +1440,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1438,12 +1470,13 @@ , i16, , + i32, i32); define @intrinsic_vdiv_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vdiv.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1452,7 +1485,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1482,12 +1515,13 @@ , i16, , + i32, i32); define @intrinsic_vdiv_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vdiv.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1496,7 +1530,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1526,12 +1560,13 @@ , i16, , + i32, i32); define @intrinsic_vdiv_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vdiv.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1540,7 +1575,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1570,12 +1605,13 @@ , i32, , + i32, i32); define @intrinsic_vdiv_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vdiv.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1584,7 +1620,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1614,12 +1650,13 @@ , i32, , + i32, i32); define @intrinsic_vdiv_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vdiv.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1628,7 +1665,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1658,12 +1695,13 @@ , i32, , + i32, i32); define @intrinsic_vdiv_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vdiv.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1672,7 +1710,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1702,12 +1740,13 @@ , i32, , + i32, i32); define @intrinsic_vdiv_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vdiv.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1716,7 +1755,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1746,12 +1785,13 @@ , i32, , + i32, i32); define @intrinsic_vdiv_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vdiv.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1760,7 +1800,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1796,6 +1836,7 @@ , i64, , + i32, i32); define @intrinsic_vdiv_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1804,10 +1845,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v25, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vdiv.vv v8, v9, v25, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1817,7 +1858,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1853,6 +1894,7 @@ , i64, , + i32, i32); define @intrinsic_vdiv_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1861,10 +1903,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v26, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vdiv.vv v8, v10, v26, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1874,7 +1916,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1910,6 +1952,7 @@ , i64, , + i32, i32); define @intrinsic_vdiv_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1918,10 +1961,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v28, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vdiv.vv v8, v12, v28, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1931,7 +1974,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1967,6 +2010,7 @@ , i64, , + i32, i32); define @intrinsic_vdiv_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1975,10 +2019,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v24, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vdiv.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1988,7 +2032,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vdiv-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vdiv-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdiv-rv64.ll @@ -26,12 +26,13 @@ , , , + i64, i64); define @intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vdiv.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -70,12 +71,13 @@ , , , + i64, i64); define @intrinsic_vdiv_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vdiv.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -114,12 +116,13 @@ , , , + i64, i64); define @intrinsic_vdiv_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vdiv.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -158,12 +161,13 @@ , , , + i64, i64); define @intrinsic_vdiv_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vdiv.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -202,12 +206,13 @@ , , , + i64, i64); define @intrinsic_vdiv_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vdiv.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -246,12 +251,13 @@ , , , + i64, i64); define @intrinsic_vdiv_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vdiv.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -260,7 +266,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -290,13 +296,14 @@ , , , + i64, i64); define @intrinsic_vdiv_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vdiv.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -335,12 +342,13 @@ , , , + i64, i64); define @intrinsic_vdiv_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vdiv.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -379,12 +387,13 @@ , , , + i64, i64); define @intrinsic_vdiv_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vdiv.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -423,12 +432,13 @@ , , , + i64, i64); define @intrinsic_vdiv_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vdiv.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -467,12 +477,13 @@ , , , + i64, i64); define @intrinsic_vdiv_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vdiv.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -481,7 +492,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -511,12 +522,13 @@ , , , + i64, i64); define @intrinsic_vdiv_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vdiv.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -525,7 +537,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -555,13 +567,14 @@ , , , + i64, i64); define @intrinsic_vdiv_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vdiv.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -600,12 +613,13 @@ , , , + i64, i64); define @intrinsic_vdiv_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vdiv.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -644,12 +658,13 @@ , , , + i64, i64); define @intrinsic_vdiv_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vdiv.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -658,7 +673,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -688,12 +703,13 @@ , , , + i64, i64); define @intrinsic_vdiv_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vdiv.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -702,7 +718,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -732,12 +748,13 @@ , , , + i64, i64); define @intrinsic_vdiv_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vdiv.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +763,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -776,13 +793,14 @@ , , , + i64, i64); define @intrinsic_vdiv_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vdiv.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +809,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -821,12 +839,13 @@ , , , + i64, i64); define @intrinsic_vdiv_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vdiv.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +854,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -865,12 +884,13 @@ , , , + i64, i64); define @intrinsic_vdiv_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vdiv.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +899,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -909,12 +929,13 @@ , , , + i64, i64); define @intrinsic_vdiv_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vdiv.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +944,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -953,13 +974,14 @@ , , , + i64, i64); define @intrinsic_vdiv_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vdiv.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -968,7 +990,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -998,12 +1020,13 @@ , i8, , + i64, i64); define @intrinsic_vdiv_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vdiv.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1012,7 +1035,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1042,12 +1065,13 @@ , i8, , + i64, i64); define @intrinsic_vdiv_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vdiv.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1056,7 +1080,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1086,12 +1110,13 @@ , i8, , + i64, i64); define @intrinsic_vdiv_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vdiv.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1100,7 +1125,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1130,12 +1155,13 @@ , i8, , + i64, i64); define @intrinsic_vdiv_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vdiv.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1144,7 +1170,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1174,12 +1200,13 @@ , i8, , + i64, i64); define @intrinsic_vdiv_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vdiv.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1188,7 +1215,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1218,12 +1245,13 @@ , i8, , + i64, i64); define @intrinsic_vdiv_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vdiv.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1232,7 +1260,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1262,12 +1290,13 @@ , i8, , + i64, i64); define @intrinsic_vdiv_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vdiv.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1276,7 +1305,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1306,12 +1335,13 @@ , i16, , + i64, i64); define @intrinsic_vdiv_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vdiv.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1320,7 +1350,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1350,12 +1380,13 @@ , i16, , + i64, i64); define @intrinsic_vdiv_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vdiv.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1364,7 +1395,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1394,12 +1425,13 @@ , i16, , + i64, i64); define @intrinsic_vdiv_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vdiv.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1408,7 +1440,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1438,12 +1470,13 @@ , i16, , + i64, i64); define @intrinsic_vdiv_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vdiv.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1452,7 +1485,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1482,12 +1515,13 @@ , i16, , + i64, i64); define @intrinsic_vdiv_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vdiv.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1496,7 +1530,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1526,12 +1560,13 @@ , i16, , + i64, i64); define @intrinsic_vdiv_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vdiv.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1540,7 +1575,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1570,12 +1605,13 @@ , i32, , + i64, i64); define @intrinsic_vdiv_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vdiv.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1584,7 +1620,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1614,12 +1650,13 @@ , i32, , + i64, i64); define @intrinsic_vdiv_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vdiv.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1628,7 +1665,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1658,12 +1695,13 @@ , i32, , + i64, i64); define @intrinsic_vdiv_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vdiv.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1672,7 +1710,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1702,12 +1740,13 @@ , i32, , + i64, i64); define @intrinsic_vdiv_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vdiv.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1716,7 +1755,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1746,12 +1785,13 @@ , i32, , + i64, i64); define @intrinsic_vdiv_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vdiv.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1760,7 +1800,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1790,12 +1830,13 @@ , i64, , + i64, i64); define @intrinsic_vdiv_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vdiv.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1804,7 +1845,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1834,12 +1875,13 @@ , i64, , + i64, i64); define @intrinsic_vdiv_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vdiv.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1848,7 +1890,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1878,12 +1920,13 @@ , i64, , + i64, i64); define @intrinsic_vdiv_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vdiv.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1892,7 +1935,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1922,12 +1965,13 @@ , i64, , + i64, i64); define @intrinsic_vdiv_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vdiv.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1936,7 +1980,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll @@ -26,12 +26,13 @@ , , , + i32, i32); define @intrinsic_vdivu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vdivu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -70,12 +71,13 @@ , , , + i32, i32); define @intrinsic_vdivu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vdivu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -114,12 +116,13 @@ , , , + i32, i32); define @intrinsic_vdivu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vdivu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -158,12 +161,13 @@ , , , + i32, i32); define @intrinsic_vdivu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vdivu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -202,12 +206,13 @@ , , , + i32, i32); define @intrinsic_vdivu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vdivu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -246,12 +251,13 @@ , , , + i32, i32); define @intrinsic_vdivu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vdivu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -260,7 +266,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -290,13 +296,14 @@ , , , + i32, i32); define @intrinsic_vdivu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vdivu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -335,12 +342,13 @@ , , , + i32, i32); define @intrinsic_vdivu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vdivu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -379,12 +387,13 @@ , , , + i32, i32); define @intrinsic_vdivu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vdivu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -423,12 +432,13 @@ , , , + i32, i32); define @intrinsic_vdivu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vdivu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -467,12 +477,13 @@ , , , + i32, i32); define @intrinsic_vdivu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vdivu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -481,7 +492,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -511,12 +522,13 @@ , , , + i32, i32); define @intrinsic_vdivu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vdivu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -525,7 +537,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -555,13 +567,14 @@ , , , + i32, i32); define @intrinsic_vdivu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vdivu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -600,12 +613,13 @@ , , , + i32, i32); define @intrinsic_vdivu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vdivu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -644,12 +658,13 @@ , , , + i32, i32); define @intrinsic_vdivu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vdivu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -658,7 +673,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -688,12 +703,13 @@ , , , + i32, i32); define @intrinsic_vdivu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vdivu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -702,7 +718,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -732,12 +748,13 @@ , , , + i32, i32); define @intrinsic_vdivu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vdivu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +763,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -776,13 +793,14 @@ , , , + i32, i32); define @intrinsic_vdivu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vdivu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +809,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -821,12 +839,13 @@ , , , + i32, i32); define @intrinsic_vdivu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vdivu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +854,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -865,12 +884,13 @@ , , , + i32, i32); define @intrinsic_vdivu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vdivu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +899,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -909,12 +929,13 @@ , , , + i32, i32); define @intrinsic_vdivu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vdivu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +944,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -953,13 +974,14 @@ , , , + i32, i32); define @intrinsic_vdivu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vdivu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -968,7 +990,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -998,12 +1020,13 @@ , i8, , + i32, i32); define @intrinsic_vdivu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vdivu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1012,7 +1035,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1042,12 +1065,13 @@ , i8, , + i32, i32); define @intrinsic_vdivu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vdivu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1056,7 +1080,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1086,12 +1110,13 @@ , i8, , + i32, i32); define @intrinsic_vdivu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vdivu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1100,7 +1125,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1130,12 +1155,13 @@ , i8, , + i32, i32); define @intrinsic_vdivu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vdivu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1144,7 +1170,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1174,12 +1200,13 @@ , i8, , + i32, i32); define @intrinsic_vdivu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vdivu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1188,7 +1215,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1218,12 +1245,13 @@ , i8, , + i32, i32); define @intrinsic_vdivu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vdivu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1232,7 +1260,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1262,12 +1290,13 @@ , i8, , + i32, i32); define @intrinsic_vdivu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vdivu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1276,7 +1305,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1306,12 +1335,13 @@ , i16, , + i32, i32); define @intrinsic_vdivu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vdivu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1320,7 +1350,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1350,12 +1380,13 @@ , i16, , + i32, i32); define @intrinsic_vdivu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vdivu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1364,7 +1395,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1394,12 +1425,13 @@ , i16, , + i32, i32); define @intrinsic_vdivu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vdivu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1408,7 +1440,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1438,12 +1470,13 @@ , i16, , + i32, i32); define @intrinsic_vdivu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vdivu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1452,7 +1485,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1482,12 +1515,13 @@ , i16, , + i32, i32); define @intrinsic_vdivu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vdivu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1496,7 +1530,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1526,12 +1560,13 @@ , i16, , + i32, i32); define @intrinsic_vdivu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vdivu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1540,7 +1575,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1570,12 +1605,13 @@ , i32, , + i32, i32); define @intrinsic_vdivu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vdivu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1584,7 +1620,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1614,12 +1650,13 @@ , i32, , + i32, i32); define @intrinsic_vdivu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vdivu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1628,7 +1665,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1658,12 +1695,13 @@ , i32, , + i32, i32); define @intrinsic_vdivu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vdivu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1672,7 +1710,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1702,12 +1740,13 @@ , i32, , + i32, i32); define @intrinsic_vdivu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vdivu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1716,7 +1755,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1746,12 +1785,13 @@ , i32, , + i32, i32); define @intrinsic_vdivu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vdivu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1760,7 +1800,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1796,6 +1836,7 @@ , i64, , + i32, i32); define @intrinsic_vdivu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1804,10 +1845,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v25, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vdivu.vv v8, v9, v25, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1817,7 +1858,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1853,6 +1894,7 @@ , i64, , + i32, i32); define @intrinsic_vdivu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1861,10 +1903,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v26, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vdivu.vv v8, v10, v26, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1874,7 +1916,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1910,6 +1952,7 @@ , i64, , + i32, i32); define @intrinsic_vdivu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1918,10 +1961,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v28, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vdivu.vv v8, v12, v28, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1931,7 +1974,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1967,6 +2010,7 @@ , i64, , + i32, i32); define @intrinsic_vdivu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1975,10 +2019,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v24, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vdivu.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1988,7 +2032,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vdivu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vdivu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vdivu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdivu-rv64.ll @@ -26,12 +26,13 @@ , , , + i64, i64); define @intrinsic_vdivu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vdivu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -70,12 +71,13 @@ , , , + i64, i64); define @intrinsic_vdivu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vdivu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -114,12 +116,13 @@ , , , + i64, i64); define @intrinsic_vdivu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vdivu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -158,12 +161,13 @@ , , , + i64, i64); define @intrinsic_vdivu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vdivu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -202,12 +206,13 @@ , , , + i64, i64); define @intrinsic_vdivu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vdivu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -246,12 +251,13 @@ , , , + i64, i64); define @intrinsic_vdivu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vdivu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -260,7 +266,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -290,13 +296,14 @@ , , , + i64, i64); define @intrinsic_vdivu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vdivu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -335,12 +342,13 @@ , , , + i64, i64); define @intrinsic_vdivu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vdivu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -379,12 +387,13 @@ , , , + i64, i64); define @intrinsic_vdivu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vdivu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -423,12 +432,13 @@ , , , + i64, i64); define @intrinsic_vdivu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vdivu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -467,12 +477,13 @@ , , , + i64, i64); define @intrinsic_vdivu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vdivu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -481,7 +492,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -511,12 +522,13 @@ , , , + i64, i64); define @intrinsic_vdivu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vdivu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -525,7 +537,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -555,13 +567,14 @@ , , , + i64, i64); define @intrinsic_vdivu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vdivu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -600,12 +613,13 @@ , , , + i64, i64); define @intrinsic_vdivu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vdivu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -644,12 +658,13 @@ , , , + i64, i64); define @intrinsic_vdivu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vdivu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -658,7 +673,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -688,12 +703,13 @@ , , , + i64, i64); define @intrinsic_vdivu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vdivu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -702,7 +718,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -732,12 +748,13 @@ , , , + i64, i64); define @intrinsic_vdivu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vdivu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +763,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -776,13 +793,14 @@ , , , + i64, i64); define @intrinsic_vdivu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vdivu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +809,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -821,12 +839,13 @@ , , , + i64, i64); define @intrinsic_vdivu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vdivu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +854,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -865,12 +884,13 @@ , , , + i64, i64); define @intrinsic_vdivu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vdivu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +899,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -909,12 +929,13 @@ , , , + i64, i64); define @intrinsic_vdivu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vdivu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +944,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -953,13 +974,14 @@ , , , + i64, i64); define @intrinsic_vdivu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vdivu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -968,7 +990,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -998,12 +1020,13 @@ , i8, , + i64, i64); define @intrinsic_vdivu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vdivu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1012,7 +1035,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1042,12 +1065,13 @@ , i8, , + i64, i64); define @intrinsic_vdivu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vdivu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1056,7 +1080,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1086,12 +1110,13 @@ , i8, , + i64, i64); define @intrinsic_vdivu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vdivu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1100,7 +1125,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1130,12 +1155,13 @@ , i8, , + i64, i64); define @intrinsic_vdivu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vdivu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1144,7 +1170,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1174,12 +1200,13 @@ , i8, , + i64, i64); define @intrinsic_vdivu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vdivu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1188,7 +1215,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1218,12 +1245,13 @@ , i8, , + i64, i64); define @intrinsic_vdivu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vdivu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1232,7 +1260,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1262,12 +1290,13 @@ , i8, , + i64, i64); define @intrinsic_vdivu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vdivu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1276,7 +1305,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1306,12 +1335,13 @@ , i16, , + i64, i64); define @intrinsic_vdivu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vdivu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1320,7 +1350,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1350,12 +1380,13 @@ , i16, , + i64, i64); define @intrinsic_vdivu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vdivu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1364,7 +1395,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1394,12 +1425,13 @@ , i16, , + i64, i64); define @intrinsic_vdivu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vdivu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1408,7 +1440,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1438,12 +1470,13 @@ , i16, , + i64, i64); define @intrinsic_vdivu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vdivu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1452,7 +1485,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1482,12 +1515,13 @@ , i16, , + i64, i64); define @intrinsic_vdivu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vdivu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1496,7 +1530,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1526,12 +1560,13 @@ , i16, , + i64, i64); define @intrinsic_vdivu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vdivu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1540,7 +1575,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1570,12 +1605,13 @@ , i32, , + i64, i64); define @intrinsic_vdivu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vdivu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1584,7 +1620,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1614,12 +1650,13 @@ , i32, , + i64, i64); define @intrinsic_vdivu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vdivu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1628,7 +1665,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1658,12 +1695,13 @@ , i32, , + i64, i64); define @intrinsic_vdivu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vdivu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1672,7 +1710,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1702,12 +1740,13 @@ , i32, , + i64, i64); define @intrinsic_vdivu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vdivu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1716,7 +1755,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1746,12 +1785,13 @@ , i32, , + i64, i64); define @intrinsic_vdivu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vdivu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1760,7 +1800,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1790,12 +1830,13 @@ , i64, , + i64, i64); define @intrinsic_vdivu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vdivu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1804,7 +1845,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1834,12 +1875,13 @@ , i64, , + i64, i64); define @intrinsic_vdivu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vdivu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1848,7 +1890,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1878,12 +1920,13 @@ , i64, , + i64, i64); define @intrinsic_vdivu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vdivu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1892,7 +1935,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1922,12 +1965,13 @@ , i64, , + i64, i64); define @intrinsic_vdivu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vdivu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1936,7 +1980,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfadd-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfadd-rv32.ll @@ -26,12 +26,13 @@ , , , + i32, i32); define @intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -70,12 +71,13 @@ , , , + i32, i32); define @intrinsic_vfadd_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -114,12 +116,13 @@ , , , + i32, i32); define @intrinsic_vfadd_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -158,12 +161,13 @@ , , , + i32, i32); define @intrinsic_vfadd_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfadd.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -202,12 +206,13 @@ , , , + i32, i32); define @intrinsic_vfadd_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfadd.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -246,13 +251,14 @@ , , , + i32, i32); define @intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vfadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -261,7 +267,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -291,12 +297,13 @@ , , , + i32, i32); define @intrinsic_vfadd_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -335,12 +342,13 @@ , , , + i32, i32); define @intrinsic_vfadd_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -379,12 +387,13 @@ , , , + i32, i32); define @intrinsic_vfadd_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfadd.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -423,12 +432,13 @@ , , , + i32, i32); define @intrinsic_vfadd_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfadd.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -467,13 +477,14 @@ , , , + i32, i32); define @intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vfadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -482,7 +493,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -512,12 +523,13 @@ , , , + i32, i32); define @intrinsic_vfadd_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -526,7 +538,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -556,12 +568,13 @@ , , , + i32, i32); define @intrinsic_vfadd_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfadd.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -600,12 +613,13 @@ , , , + i32, i32); define @intrinsic_vfadd_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfadd.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -644,13 +658,14 @@ , , , + i32, i32); define @intrinsic_vfadd_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vfadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -659,7 +674,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -690,13 +705,14 @@ , half, , + i32, i32); define @intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vfadd.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -705,7 +721,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -736,13 +752,14 @@ , half, , + i32, i32); define @intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vfadd.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -751,7 +768,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -782,13 +799,14 @@ , half, , + i32, i32); define @intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vfadd.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -797,7 +815,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -828,13 +846,14 @@ , half, , + i32, i32); define @intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vfadd.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -843,7 +862,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -874,13 +893,14 @@ , half, , + i32, i32); define @intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vfadd.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -889,7 +909,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -920,13 +940,14 @@ , half, , + i32, i32); define @intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vfadd.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -935,7 +956,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -966,13 +987,14 @@ , float, , + i32, i32); define @intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vfadd.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -981,7 +1003,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1012,13 +1034,14 @@ , float, , + i32, i32); define @intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vfadd.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1027,7 +1050,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1058,13 +1081,14 @@ , float, , + i32, i32); define @intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vfadd.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1073,7 +1097,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1104,13 +1128,14 @@ , float, , + i32, i32); define @intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vfadd.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1119,7 +1144,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1150,13 +1175,14 @@ , float, , + i32, i32); define @intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vfadd.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1165,7 +1191,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1200,6 +1226,7 @@ , double, , + i32, i32); define @intrinsic_vfadd_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { @@ -1209,7 +1236,7 @@ ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vfadd.vf v8, v9, ft0, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1219,7 +1246,7 @@ %1, double %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1254,6 +1281,7 @@ , double, , + i32, i32); define @intrinsic_vfadd_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { @@ -1263,7 +1291,7 @@ ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vfadd.vf v8, v10, ft0, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1273,7 +1301,7 @@ %1, double %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1308,6 +1336,7 @@ , double, , + i32, i32); define @intrinsic_vfadd_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { @@ -1317,7 +1346,7 @@ ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vfadd.vf v8, v12, ft0, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1327,7 +1356,7 @@ %1, double %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1362,6 +1391,7 @@ , double, , + i32, i32); define @intrinsic_vfadd_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { @@ -1371,7 +1401,7 @@ ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vfadd.vf v8, v16, ft0, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1381,7 +1411,7 @@ %1, double %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfadd-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfadd-rv64.ll @@ -27,12 +27,13 @@ , , , + i64, i64); define @intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -41,7 +42,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -71,12 +72,13 @@ , , , + i64, i64); define @intrinsic_vfadd_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -85,7 +87,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -115,12 +117,13 @@ , , , + i64, i64); define @intrinsic_vfadd_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -129,7 +132,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -159,12 +162,13 @@ , , , + i64, i64); define @intrinsic_vfadd_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfadd.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -173,7 +177,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -203,12 +207,13 @@ , , , + i64, i64); define @intrinsic_vfadd_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfadd.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -217,7 +222,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -247,13 +252,14 @@ , , , + i64, i64); define @intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vfadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -262,7 +268,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -292,12 +298,13 @@ , , , + i64, i64); define @intrinsic_vfadd_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -306,7 +313,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -336,12 +343,13 @@ , , , + i64, i64); define @intrinsic_vfadd_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -350,7 +358,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -380,12 +388,13 @@ , , , + i64, i64); define @intrinsic_vfadd_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfadd.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -394,7 +403,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -424,12 +433,13 @@ , , , + i64, i64); define @intrinsic_vfadd_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfadd.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -438,7 +448,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -468,13 +478,14 @@ , , , + i64, i64); define @intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vfadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -483,7 +494,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -513,12 +524,13 @@ , , , + i64, i64); define @intrinsic_vfadd_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -527,7 +539,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -557,12 +569,13 @@ , , , + i64, i64); define @intrinsic_vfadd_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfadd.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -571,7 +584,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -601,12 +614,13 @@ , , , + i64, i64); define @intrinsic_vfadd_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfadd.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -615,7 +629,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -645,13 +659,14 @@ , , , + i64, i64); define @intrinsic_vfadd_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vfadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -660,7 +675,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -691,13 +706,14 @@ , half, , + i64, i64); define @intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vfadd.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -706,7 +722,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -737,13 +753,14 @@ , half, , + i64, i64); define @intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vfadd.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -752,7 +769,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -783,13 +800,14 @@ , half, , + i64, i64); define @intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vfadd.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -798,7 +816,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -829,13 +847,14 @@ , half, , + i64, i64); define @intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vfadd.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -844,7 +863,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -875,13 +894,14 @@ , half, , + i64, i64); define @intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vfadd.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -890,7 +910,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -921,13 +941,14 @@ , half, , + i64, i64); define @intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vfadd.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -936,7 +957,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -967,13 +988,14 @@ , float, , + i64, i64); define @intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vfadd.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -982,7 +1004,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1013,13 +1035,14 @@ , float, , + i64, i64); define @intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vfadd.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1028,7 +1051,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1059,13 +1082,14 @@ , float, , + i64, i64); define @intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vfadd.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1074,7 +1098,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1105,13 +1129,14 @@ , float, , + i64, i64); define @intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vfadd.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1120,7 +1145,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1151,13 +1176,14 @@ , float, , + i64, i64); define @intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vfadd.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1166,7 +1192,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1197,13 +1223,14 @@ , double, , + i64, i64); define @intrinsic_vfadd_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vfadd.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1212,7 +1239,7 @@ %1, double %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1243,13 +1270,14 @@ , double, , + i64, i64); define @intrinsic_vfadd_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vfadd.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1258,7 +1286,7 @@ %1, double %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1289,13 +1317,14 @@ , double, , + i64, i64); define @intrinsic_vfadd_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vfadd.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1304,7 +1333,7 @@ %1, double %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1335,13 +1364,14 @@ , double, , + i64, i64); define @intrinsic_vfadd_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vfadd.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1350,7 +1380,7 @@ %1, double %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x-rv32.ll @@ -23,12 +23,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfcvt.f.x.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -36,7 +37,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -63,12 +64,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_f.x.v_nxv2f16_nxv2i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfcvt.f.x.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -76,7 +78,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -103,12 +105,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_f.x.v_nxv4f16_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfcvt.f.x.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -116,7 +119,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -143,12 +146,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_f.x.v_nxv8f16_nxv8i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfcvt.f.x.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -156,7 +160,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -183,12 +187,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_f.x.v_nxv16f16_nxv16i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfcvt.f.x.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -196,7 +201,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -223,12 +228,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_f.x.v_nxv32f16_nxv32i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vfcvt.f.x.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -236,7 +242,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -263,12 +269,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_f.x.v_nxv1f32_nxv1i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfcvt.f.x.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -276,7 +283,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -303,12 +310,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_f.x.v_nxv2f32_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfcvt.f.x.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -316,7 +324,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -343,12 +351,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_f.x.v_nxv4f32_nxv4i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfcvt.f.x.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -356,7 +365,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -383,12 +392,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_f.x.v_nxv8f32_nxv8i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfcvt.f.x.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -396,7 +406,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -423,12 +433,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_f.x.v_nxv16f32_nxv16i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vfcvt.f.x.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -436,7 +447,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -463,12 +474,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_f.x.v_nxv1f64_nxv1i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfcvt.f.x.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -476,7 +488,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -503,12 +515,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_f.x.v_nxv2f64_nxv2i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfcvt.f.x.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -516,7 +529,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -543,12 +556,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_f.x.v_nxv4f64_nxv4i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfcvt.f.x.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -556,7 +570,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -583,12 +597,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_f.x.v_nxv8f64_nxv8i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vfcvt.f.x.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -596,7 +611,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x-rv64.ll @@ -23,12 +23,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfcvt.f.x.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -36,7 +37,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -63,12 +64,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_f.x.v_nxv2f16_nxv2i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfcvt.f.x.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -76,7 +78,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -103,12 +105,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_f.x.v_nxv4f16_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfcvt.f.x.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -116,7 +119,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -143,12 +146,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_f.x.v_nxv8f16_nxv8i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfcvt.f.x.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -156,7 +160,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -183,12 +187,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_f.x.v_nxv16f16_nxv16i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfcvt.f.x.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -196,7 +201,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -223,12 +228,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_f.x.v_nxv32f16_nxv32i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vfcvt.f.x.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -236,7 +242,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -263,12 +269,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_f.x.v_nxv1f32_nxv1i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfcvt.f.x.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -276,7 +283,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -303,12 +310,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_f.x.v_nxv2f32_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfcvt.f.x.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -316,7 +324,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -343,12 +351,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_f.x.v_nxv4f32_nxv4i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfcvt.f.x.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -356,7 +365,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -383,12 +392,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_f.x.v_nxv8f32_nxv8i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfcvt.f.x.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -396,7 +406,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -423,12 +433,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_f.x.v_nxv16f32_nxv16i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vfcvt.f.x.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -436,7 +447,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -463,12 +474,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_f.x.v_nxv1f64_nxv1i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfcvt.f.x.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -476,7 +488,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -503,12 +515,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_f.x.v_nxv2f64_nxv2i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfcvt.f.x.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -516,7 +529,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -543,12 +556,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_f.x.v_nxv4f64_nxv4i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfcvt.f.x.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -556,7 +570,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -583,12 +597,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_f.x.v_nxv8f64_nxv8i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vfcvt.f.x.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -596,7 +611,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu-rv32.ll @@ -23,12 +23,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_f.xu.v_nxv1f16_nxv1i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfcvt.f.xu.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -36,7 +37,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -63,12 +64,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_f.xu.v_nxv2f16_nxv2i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfcvt.f.xu.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -76,7 +78,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -103,12 +105,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_f.xu.v_nxv4f16_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfcvt.f.xu.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -116,7 +119,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -143,12 +146,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_f.xu.v_nxv8f16_nxv8i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfcvt.f.xu.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -156,7 +160,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -183,12 +187,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_f.xu.v_nxv16f16_nxv16i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfcvt.f.xu.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -196,7 +201,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -223,12 +228,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_f.xu.v_nxv32f16_nxv32i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vfcvt.f.xu.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -236,7 +242,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -263,12 +269,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_f.xu.v_nxv1f32_nxv1i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfcvt.f.xu.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -276,7 +283,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -303,12 +310,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_f.xu.v_nxv2f32_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfcvt.f.xu.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -316,7 +324,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -343,12 +351,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_f.xu.v_nxv4f32_nxv4i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfcvt.f.xu.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -356,7 +365,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -383,12 +392,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_f.xu.v_nxv8f32_nxv8i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfcvt.f.xu.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -396,7 +406,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -423,12 +433,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_f.xu.v_nxv16f32_nxv16i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vfcvt.f.xu.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -436,7 +447,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -463,12 +474,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_f.xu.v_nxv1f64_nxv1i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfcvt.f.xu.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -476,7 +488,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -503,12 +515,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_f.xu.v_nxv2f64_nxv2i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfcvt.f.xu.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -516,7 +529,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -543,12 +556,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_f.xu.v_nxv4f64_nxv4i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfcvt.f.xu.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -556,7 +570,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -583,12 +597,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_f.xu.v_nxv8f64_nxv8i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vfcvt.f.xu.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -596,7 +611,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu-rv64.ll @@ -23,12 +23,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_f.xu.v_nxv1f16_nxv1i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfcvt.f.xu.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -36,7 +37,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -63,12 +64,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_f.xu.v_nxv2f16_nxv2i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfcvt.f.xu.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -76,7 +78,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -103,12 +105,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_f.xu.v_nxv4f16_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfcvt.f.xu.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -116,7 +119,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -143,12 +146,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_f.xu.v_nxv8f16_nxv8i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfcvt.f.xu.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -156,7 +160,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -183,12 +187,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_f.xu.v_nxv16f16_nxv16i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfcvt.f.xu.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -196,7 +201,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -223,12 +228,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_f.xu.v_nxv32f16_nxv32i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vfcvt.f.xu.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -236,7 +242,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -263,12 +269,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_f.xu.v_nxv1f32_nxv1i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfcvt.f.xu.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -276,7 +283,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -303,12 +310,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_f.xu.v_nxv2f32_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfcvt.f.xu.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -316,7 +324,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -343,12 +351,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_f.xu.v_nxv4f32_nxv4i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfcvt.f.xu.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -356,7 +365,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -383,12 +392,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_f.xu.v_nxv8f32_nxv8i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfcvt.f.xu.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -396,7 +406,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -423,12 +433,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_f.xu.v_nxv16f32_nxv16i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vfcvt.f.xu.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -436,7 +447,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -463,12 +474,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_f.xu.v_nxv1f64_nxv1i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfcvt.f.xu.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -476,7 +488,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -503,12 +515,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_f.xu.v_nxv2f64_nxv2i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfcvt.f.xu.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -516,7 +529,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -543,12 +556,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_f.xu.v_nxv4f64_nxv4i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfcvt.f.xu.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -556,7 +570,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -583,12 +597,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_f.xu.v_nxv8f64_nxv8i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vfcvt.f.xu.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -596,7 +611,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f-rv32.ll @@ -23,12 +23,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i16_nxv1f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -36,7 +37,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -63,12 +64,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i16_nxv2f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -76,7 +78,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -103,12 +105,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i16_nxv4f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -116,7 +119,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -143,12 +146,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i16_nxv8f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -156,7 +160,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -183,12 +187,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv16i16_nxv16f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv16i16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -196,7 +201,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -223,12 +228,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv32i16_nxv32f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv32i16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -236,7 +242,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -263,12 +269,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i32_nxv1f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -276,7 +283,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -303,12 +310,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i32_nxv2f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -316,7 +324,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -343,12 +351,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i32_nxv4f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -356,7 +365,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -383,12 +392,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i32_nxv8f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -396,7 +406,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -423,12 +433,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv16i32_nxv16f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv16i32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -436,7 +447,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -463,12 +474,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i64_nxv1f64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -476,7 +488,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -503,12 +515,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i64_nxv2f64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -516,7 +529,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -543,12 +556,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i64_nxv4f64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -556,7 +570,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -583,12 +597,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i64_nxv8f64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -596,7 +611,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f-rv64.ll @@ -23,12 +23,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i16_nxv1f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -36,7 +37,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -63,12 +64,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i16_nxv2f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -76,7 +78,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -103,12 +105,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i16_nxv4f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -116,7 +119,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -143,12 +146,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i16_nxv8f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -156,7 +160,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -183,12 +187,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv16i16_nxv16f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv16i16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -196,7 +201,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -223,12 +228,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv32i16_nxv32f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv32i16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -236,7 +242,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -263,12 +269,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i32_nxv1f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -276,7 +283,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -303,12 +310,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i32_nxv2f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -316,7 +324,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -343,12 +351,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i32_nxv4f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -356,7 +365,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -383,12 +392,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i32_nxv8f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -396,7 +406,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -423,12 +433,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv16i32_nxv16f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv16i32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -436,7 +447,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -463,12 +474,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i64_nxv1f64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -476,7 +488,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -503,12 +515,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i64_nxv2f64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -516,7 +529,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -543,12 +556,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i64_nxv4f64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -556,7 +570,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -583,12 +597,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i64_nxv8f64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -596,7 +611,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f-rv32.ll @@ -23,12 +23,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i16_nxv1f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -36,7 +37,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -63,12 +64,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i16_nxv2f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -76,7 +78,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -103,12 +105,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i16_nxv4f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -116,7 +119,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -143,12 +146,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i16_nxv8f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -156,7 +160,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -183,12 +187,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv16i16_nxv16f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv16i16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -196,7 +201,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -223,12 +228,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv32i16_nxv32f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv32i16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -236,7 +242,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -263,12 +269,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i32_nxv1f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -276,7 +283,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -303,12 +310,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i32_nxv2f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -316,7 +324,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -343,12 +351,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i32_nxv4f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -356,7 +365,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -383,12 +392,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i32_nxv8f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -396,7 +406,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -423,12 +433,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv16i32_nxv16f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv16i32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -436,7 +447,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -463,12 +474,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i64_nxv1f64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -476,7 +488,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -503,12 +515,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i64_nxv2f64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -516,7 +529,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -543,12 +556,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i64_nxv4f64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -556,7 +570,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -583,12 +597,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i64_nxv8f64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -596,7 +611,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f-rv64.ll @@ -23,12 +23,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i16_nxv1f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -36,7 +37,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -63,12 +64,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i16_nxv2f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -76,7 +78,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -103,12 +105,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i16_nxv4f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -116,7 +119,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -143,12 +146,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i16_nxv8f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -156,7 +160,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -183,12 +187,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv16i16_nxv16f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv16i16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -196,7 +201,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -223,12 +228,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv32i16_nxv32f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv32i16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -236,7 +242,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -263,12 +269,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i32_nxv1f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -276,7 +283,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -303,12 +310,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i32_nxv2f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -316,7 +324,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -343,12 +351,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i32_nxv4f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -356,7 +365,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -383,12 +392,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i32_nxv8f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -396,7 +406,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -423,12 +433,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv16i32_nxv16f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv16i32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -436,7 +447,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -463,12 +474,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i64_nxv1f64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -476,7 +488,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -503,12 +515,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i64_nxv2f64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -516,7 +529,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -543,12 +556,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i64_nxv4f64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -556,7 +570,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -583,12 +597,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i64_nxv8f64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -596,7 +611,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f-rv32.ll @@ -23,12 +23,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_x.f.v_nxv1i16_nxv1f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfcvt.x.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -36,7 +37,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -63,12 +64,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_x.f.v_nxv2i16_nxv2f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv2i16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfcvt.x.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -76,7 +78,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -103,12 +105,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_x.f.v_nxv4i16_nxv4f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv4i16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfcvt.x.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -116,7 +119,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -143,12 +146,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_x.f.v_nxv8i16_nxv8f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv8i16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfcvt.x.f.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -156,7 +160,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -183,12 +187,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_x.f.v_nxv16i16_nxv16f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv16i16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfcvt.x.f.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -196,7 +201,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -223,12 +228,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_x.f.v_nxv32i16_nxv32f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv32i16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -236,7 +242,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -263,12 +269,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_x.f.v_nxv1i32_nxv1f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv1i32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfcvt.x.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -276,7 +283,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -303,12 +310,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_x.f.v_nxv2i32_nxv2f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv2i32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfcvt.x.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -316,7 +324,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -343,12 +351,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_x.f.v_nxv4i32_nxv4f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv4i32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfcvt.x.f.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -356,7 +365,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -383,12 +392,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_x.f.v_nxv8i32_nxv8f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv8i32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfcvt.x.f.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -396,7 +406,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -423,12 +433,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_x.f.v_nxv16i32_nxv16f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv16i32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -436,7 +447,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -463,12 +474,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_x.f.v_nxv1i64_nxv1f64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv1i64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfcvt.x.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -476,7 +488,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -503,12 +515,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_x.f.v_nxv2i64_nxv2f64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv2i64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfcvt.x.f.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -516,7 +529,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -543,12 +556,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_x.f.v_nxv4i64_nxv4f64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv4i64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfcvt.x.f.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -556,7 +570,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -583,12 +597,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_x.f.v_nxv8i64_nxv8f64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv8i64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -596,7 +611,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f-rv64.ll @@ -23,12 +23,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_x.f.v_nxv1i16_nxv1f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfcvt.x.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -36,7 +37,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -63,12 +64,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_x.f.v_nxv2i16_nxv2f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv2i16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfcvt.x.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -76,7 +78,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -103,12 +105,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_x.f.v_nxv4i16_nxv4f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv4i16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfcvt.x.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -116,7 +119,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -143,12 +146,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_x.f.v_nxv8i16_nxv8f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv8i16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfcvt.x.f.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -156,7 +160,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -183,12 +187,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_x.f.v_nxv16i16_nxv16f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv16i16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfcvt.x.f.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -196,7 +201,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -223,12 +228,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_x.f.v_nxv32i16_nxv32f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv32i16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -236,7 +242,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -263,12 +269,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_x.f.v_nxv1i32_nxv1f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv1i32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfcvt.x.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -276,7 +283,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -303,12 +310,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_x.f.v_nxv2i32_nxv2f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv2i32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfcvt.x.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -316,7 +324,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -343,12 +351,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_x.f.v_nxv4i32_nxv4f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv4i32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfcvt.x.f.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -356,7 +365,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -383,12 +392,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_x.f.v_nxv8i32_nxv8f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv8i32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfcvt.x.f.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -396,7 +406,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -423,12 +433,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_x.f.v_nxv16i32_nxv16f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv16i32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -436,7 +447,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -463,12 +474,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_x.f.v_nxv1i64_nxv1f64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv1i64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfcvt.x.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -476,7 +488,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -503,12 +515,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_x.f.v_nxv2i64_nxv2f64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv2i64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfcvt.x.f.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -516,7 +529,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -543,12 +556,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_x.f.v_nxv4i64_nxv4f64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv4i64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfcvt.x.f.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -556,7 +570,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -583,12 +597,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_x.f.v_nxv8i64_nxv8f64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv8i64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -596,7 +611,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f-rv32.ll @@ -23,12 +23,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfcvt.xu.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -36,7 +37,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -63,12 +64,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_xu.f.v_nxv2i16_nxv2f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv2i16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfcvt.xu.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -76,7 +78,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -103,12 +105,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_xu.f.v_nxv4i16_nxv4f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv4i16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfcvt.xu.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -116,7 +119,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -143,12 +146,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_xu.f.v_nxv8i16_nxv8f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv8i16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfcvt.xu.f.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -156,7 +160,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -183,12 +187,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_xu.f.v_nxv16i16_nxv16f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv16i16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfcvt.xu.f.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -196,7 +201,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -223,12 +228,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_xu.f.v_nxv32i16_nxv32f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv32i16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vfcvt.xu.f.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -236,7 +242,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -263,12 +269,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_xu.f.v_nxv1i32_nxv1f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv1i32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfcvt.xu.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -276,7 +283,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -303,12 +310,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_xu.f.v_nxv2i32_nxv2f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv2i32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfcvt.xu.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -316,7 +324,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -343,12 +351,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_xu.f.v_nxv4i32_nxv4f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv4i32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfcvt.xu.f.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -356,7 +365,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -383,12 +392,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_xu.f.v_nxv8i32_nxv8f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv8i32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfcvt.xu.f.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -396,7 +406,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -423,12 +433,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_xu.f.v_nxv16i32_nxv16f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv16i32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vfcvt.xu.f.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -436,7 +447,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -463,12 +474,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_xu.f.v_nxv1i64_nxv1f64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv1i64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfcvt.xu.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -476,7 +488,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -503,12 +515,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_xu.f.v_nxv2i64_nxv2f64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv2i64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfcvt.xu.f.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -516,7 +529,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -543,12 +556,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_xu.f.v_nxv4i64_nxv4f64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv4i64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfcvt.xu.f.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -556,7 +570,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -583,12 +597,13 @@ , , , + i32, i32); define @intrinsic_vfcvt_mask_xu.f.v_nxv8i64_nxv8f64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv8i64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vfcvt.xu.f.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -596,7 +611,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f-rv64.ll @@ -23,12 +23,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfcvt.xu.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -36,7 +37,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -63,12 +64,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_xu.f.v_nxv2i16_nxv2f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv2i16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfcvt.xu.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -76,7 +78,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -103,12 +105,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_xu.f.v_nxv4i16_nxv4f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv4i16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfcvt.xu.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -116,7 +119,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -143,12 +146,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_xu.f.v_nxv8i16_nxv8f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv8i16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfcvt.xu.f.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -156,7 +160,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -183,12 +187,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_xu.f.v_nxv16i16_nxv16f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv16i16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfcvt.xu.f.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -196,7 +201,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -223,12 +228,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_xu.f.v_nxv32i16_nxv32f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv32i16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vfcvt.xu.f.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -236,7 +242,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -263,12 +269,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_xu.f.v_nxv1i32_nxv1f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv1i32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfcvt.xu.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -276,7 +283,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -303,12 +310,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_xu.f.v_nxv2i32_nxv2f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv2i32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfcvt.xu.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -316,7 +324,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -343,12 +351,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_xu.f.v_nxv4i32_nxv4f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv4i32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfcvt.xu.f.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -356,7 +365,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -383,12 +392,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_xu.f.v_nxv8i32_nxv8f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv8i32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfcvt.xu.f.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -396,7 +406,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -423,12 +433,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_xu.f.v_nxv16i32_nxv16f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv16i32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vfcvt.xu.f.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -436,7 +447,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -463,12 +474,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_xu.f.v_nxv1i64_nxv1f64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv1i64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfcvt.xu.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -476,7 +488,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -503,12 +515,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_xu.f.v_nxv2i64_nxv2f64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv2i64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfcvt.xu.f.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -516,7 +529,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -543,12 +556,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_xu.f.v_nxv4i64_nxv4f64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv4i64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfcvt.xu.f.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -556,7 +570,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -583,12 +597,13 @@ , , , + i64, i64); define @intrinsic_vfcvt_mask_xu.f.v_nxv8i64_nxv8f64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv8i64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vfcvt.xu.f.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -596,7 +611,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv32.ll @@ -26,12 +26,13 @@ , , , + i32, i32); define @intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -70,12 +71,13 @@ , , , + i32, i32); define @intrinsic_vfdiv_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -114,12 +116,13 @@ , , , + i32, i32); define @intrinsic_vfdiv_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -158,12 +161,13 @@ , , , + i32, i32); define @intrinsic_vfdiv_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -202,12 +206,13 @@ , , , + i32, i32); define @intrinsic_vfdiv_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -246,13 +251,14 @@ , , , + i32, i32); define @intrinsic_vfdiv_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -261,7 +267,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -291,12 +297,13 @@ , , , + i32, i32); define @intrinsic_vfdiv_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -335,12 +342,13 @@ , , , + i32, i32); define @intrinsic_vfdiv_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -379,12 +387,13 @@ , , , + i32, i32); define @intrinsic_vfdiv_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -423,12 +432,13 @@ , , , + i32, i32); define @intrinsic_vfdiv_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -467,13 +477,14 @@ , , , + i32, i32); define @intrinsic_vfdiv_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -482,7 +493,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -512,12 +523,13 @@ , , , + i32, i32); define @intrinsic_vfdiv_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -526,7 +538,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -556,12 +568,13 @@ , , , + i32, i32); define @intrinsic_vfdiv_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -600,12 +613,13 @@ , , , + i32, i32); define @intrinsic_vfdiv_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -644,13 +658,14 @@ , , , + i32, i32); define @intrinsic_vfdiv_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -659,7 +674,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -690,13 +705,14 @@ , half, , + i32, i32); define @intrinsic_vfdiv_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vfdiv.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -705,7 +721,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -736,13 +752,14 @@ , half, , + i32, i32); define @intrinsic_vfdiv_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vfdiv.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -751,7 +768,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -782,13 +799,14 @@ , half, , + i32, i32); define @intrinsic_vfdiv_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vfdiv.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -797,7 +815,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -828,13 +846,14 @@ , half, , + i32, i32); define @intrinsic_vfdiv_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vfdiv.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -843,7 +862,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -874,13 +893,14 @@ , half, , + i32, i32); define @intrinsic_vfdiv_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vfdiv.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -889,7 +909,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -920,13 +940,14 @@ , half, , + i32, i32); define @intrinsic_vfdiv_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vfdiv.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -935,7 +956,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -966,13 +987,14 @@ , float, , + i32, i32); define @intrinsic_vfdiv_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vfdiv.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -981,7 +1003,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1012,13 +1034,14 @@ , float, , + i32, i32); define @intrinsic_vfdiv_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vfdiv.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1027,7 +1050,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1058,13 +1081,14 @@ , float, , + i32, i32); define @intrinsic_vfdiv_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vfdiv.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1073,7 +1097,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1104,13 +1128,14 @@ , float, , + i32, i32); define @intrinsic_vfdiv_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vfdiv.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1119,7 +1144,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1150,13 +1175,14 @@ , float, , + i32, i32); define @intrinsic_vfdiv_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vfdiv.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1165,7 +1191,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1200,6 +1226,7 @@ , double, , + i32, i32); define @intrinsic_vfdiv_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { @@ -1209,7 +1236,7 @@ ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vfdiv.vf v8, v9, ft0, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1219,7 +1246,7 @@ %1, double %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1254,6 +1281,7 @@ , double, , + i32, i32); define @intrinsic_vfdiv_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { @@ -1263,7 +1291,7 @@ ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vfdiv.vf v8, v10, ft0, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1273,7 +1301,7 @@ %1, double %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1308,6 +1336,7 @@ , double, , + i32, i32); define @intrinsic_vfdiv_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { @@ -1317,7 +1346,7 @@ ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vfdiv.vf v8, v12, ft0, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1327,7 +1356,7 @@ %1, double %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1362,6 +1391,7 @@ , double, , + i32, i32); define @intrinsic_vfdiv_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { @@ -1371,7 +1401,7 @@ ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vfdiv.vf v8, v16, ft0, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1381,7 +1411,7 @@ %1, double %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv64.ll @@ -26,12 +26,13 @@ , , , + i64, i64); define @intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -70,12 +71,13 @@ , , , + i64, i64); define @intrinsic_vfdiv_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -114,12 +116,13 @@ , , , + i64, i64); define @intrinsic_vfdiv_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -158,12 +161,13 @@ , , , + i64, i64); define @intrinsic_vfdiv_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -202,12 +206,13 @@ , , , + i64, i64); define @intrinsic_vfdiv_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -246,13 +251,14 @@ , , , + i64, i64); define @intrinsic_vfdiv_mask_vv_nxv32f16_nxv32f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -261,7 +267,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -291,12 +297,13 @@ , , , + i64, i64); define @intrinsic_vfdiv_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -335,12 +342,13 @@ , , , + i64, i64); define @intrinsic_vfdiv_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -379,12 +387,13 @@ , , , + i64, i64); define @intrinsic_vfdiv_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -423,12 +432,13 @@ , , , + i64, i64); define @intrinsic_vfdiv_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -467,13 +477,14 @@ , , , + i64, i64); define @intrinsic_vfdiv_mask_vv_nxv16f32_nxv16f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -482,7 +493,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -512,12 +523,13 @@ , , , + i64, i64); define @intrinsic_vfdiv_mask_vv_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -526,7 +538,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -556,12 +568,13 @@ , , , + i64, i64); define @intrinsic_vfdiv_mask_vv_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -600,12 +613,13 @@ , , , + i64, i64); define @intrinsic_vfdiv_mask_vv_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -644,13 +658,14 @@ , , , + i64, i64); define @intrinsic_vfdiv_mask_vv_nxv8f64_nxv8f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -659,7 +674,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -690,13 +705,14 @@ , half, , + i64, i64); define @intrinsic_vfdiv_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vfdiv.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -705,7 +721,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -736,13 +752,14 @@ , half, , + i64, i64); define @intrinsic_vfdiv_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vfdiv.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -751,7 +768,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -782,13 +799,14 @@ , half, , + i64, i64); define @intrinsic_vfdiv_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vfdiv.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -797,7 +815,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -828,13 +846,14 @@ , half, , + i64, i64); define @intrinsic_vfdiv_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vfdiv.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -843,7 +862,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -874,13 +893,14 @@ , half, , + i64, i64); define @intrinsic_vfdiv_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vfdiv.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -889,7 +909,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -920,13 +940,14 @@ , half, , + i64, i64); define @intrinsic_vfdiv_mask_vf_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vfdiv.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -935,7 +956,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -966,13 +987,14 @@ , float, , + i64, i64); define @intrinsic_vfdiv_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vfdiv.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -981,7 +1003,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1012,13 +1034,14 @@ , float, , + i64, i64); define @intrinsic_vfdiv_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vfdiv.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1027,7 +1050,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1058,13 +1081,14 @@ , float, , + i64, i64); define @intrinsic_vfdiv_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vfdiv.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1073,7 +1097,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1104,13 +1128,14 @@ , float, , + i64, i64); define @intrinsic_vfdiv_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vfdiv.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1119,7 +1144,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1150,13 +1175,14 @@ , float, , + i64, i64); define @intrinsic_vfdiv_mask_vf_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vfdiv.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1165,7 +1191,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1196,13 +1222,14 @@ , double, , + i64, i64); define @intrinsic_vfdiv_mask_vf_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vfdiv.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1211,7 +1238,7 @@ %1, double %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1242,13 +1269,14 @@ , double, , + i64, i64); define @intrinsic_vfdiv_mask_vf_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vfdiv.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1257,7 +1285,7 @@ %1, double %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1288,13 +1316,14 @@ , double, , + i64, i64); define @intrinsic_vfdiv_mask_vf_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vfdiv.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1303,7 +1332,7 @@ %1, double %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1334,13 +1363,14 @@ , double, , + i64, i64); define @intrinsic_vfdiv_mask_vf_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vfdiv.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1349,7 +1379,7 @@ %1, double %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmax-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmax-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmax-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmax-rv32.ll @@ -26,12 +26,13 @@ , , , + i32, i32); define @intrinsic_vfmax_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfmax.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -70,12 +71,13 @@ , , , + i32, i32); define @intrinsic_vfmax_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfmax.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -114,12 +116,13 @@ , , , + i32, i32); define @intrinsic_vfmax_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfmax.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -158,12 +161,13 @@ , , , + i32, i32); define @intrinsic_vfmax_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfmax.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -202,12 +206,13 @@ , , , + i32, i32); define @intrinsic_vfmax_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfmax.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -246,13 +251,14 @@ , , , + i32, i32); define @intrinsic_vfmax_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vfmax.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -261,7 +267,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -291,12 +297,13 @@ , , , + i32, i32); define @intrinsic_vfmax_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfmax.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -335,12 +342,13 @@ , , , + i32, i32); define @intrinsic_vfmax_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfmax.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -379,12 +387,13 @@ , , , + i32, i32); define @intrinsic_vfmax_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfmax.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -423,12 +432,13 @@ , , , + i32, i32); define @intrinsic_vfmax_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfmax.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -467,13 +477,14 @@ , , , + i32, i32); define @intrinsic_vfmax_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vfmax.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -482,7 +493,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -512,12 +523,13 @@ , , , + i32, i32); define @intrinsic_vfmax_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfmax.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -526,7 +538,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -556,12 +568,13 @@ , , , + i32, i32); define @intrinsic_vfmax_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfmax.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -600,12 +613,13 @@ , , , + i32, i32); define @intrinsic_vfmax_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfmax.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -644,13 +658,14 @@ , , , + i32, i32); define @intrinsic_vfmax_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vfmax.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -659,7 +674,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -690,13 +705,14 @@ , half, , + i32, i32); define @intrinsic_vfmax_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vfmax.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -705,7 +721,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -736,13 +752,14 @@ , half, , + i32, i32); define @intrinsic_vfmax_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vfmax.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -751,7 +768,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -782,13 +799,14 @@ , half, , + i32, i32); define @intrinsic_vfmax_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vfmax.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -797,7 +815,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -828,13 +846,14 @@ , half, , + i32, i32); define @intrinsic_vfmax_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vfmax.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -843,7 +862,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -874,13 +893,14 @@ , half, , + i32, i32); define @intrinsic_vfmax_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vfmax.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -889,7 +909,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -920,13 +940,14 @@ , half, , + i32, i32); define @intrinsic_vfmax_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vfmax.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -935,7 +956,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -966,13 +987,14 @@ , float, , + i32, i32); define @intrinsic_vfmax_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vfmax.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -981,7 +1003,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1012,13 +1034,14 @@ , float, , + i32, i32); define @intrinsic_vfmax_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vfmax.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1027,7 +1050,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1058,13 +1081,14 @@ , float, , + i32, i32); define @intrinsic_vfmax_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vfmax.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1073,7 +1097,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1104,13 +1128,14 @@ , float, , + i32, i32); define @intrinsic_vfmax_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vfmax.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1119,7 +1144,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1150,13 +1175,14 @@ , float, , + i32, i32); define @intrinsic_vfmax_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vfmax.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1165,7 +1191,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1200,6 +1226,7 @@ , double, , + i32, i32); define @intrinsic_vfmax_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { @@ -1209,7 +1236,7 @@ ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vfmax.vf v8, v9, ft0, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1219,7 +1246,7 @@ %1, double %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1254,6 +1281,7 @@ , double, , + i32, i32); define @intrinsic_vfmax_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { @@ -1263,7 +1291,7 @@ ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vfmax.vf v8, v10, ft0, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1273,7 +1301,7 @@ %1, double %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1308,6 +1336,7 @@ , double, , + i32, i32); define @intrinsic_vfmax_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { @@ -1317,7 +1346,7 @@ ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vfmax.vf v8, v12, ft0, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1327,7 +1356,7 @@ %1, double %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1362,6 +1391,7 @@ , double, , + i32, i32); define @intrinsic_vfmax_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { @@ -1371,7 +1401,7 @@ ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vfmax.vf v8, v16, ft0, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1381,7 +1411,7 @@ %1, double %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmax-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmax-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmax-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmax-rv64.ll @@ -26,12 +26,13 @@ , , , + i64, i64); define @intrinsic_vfmax_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfmax.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -70,12 +71,13 @@ , , , + i64, i64); define @intrinsic_vfmax_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfmax.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -114,12 +116,13 @@ , , , + i64, i64); define @intrinsic_vfmax_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfmax.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -158,12 +161,13 @@ , , , + i64, i64); define @intrinsic_vfmax_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfmax.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -202,12 +206,13 @@ , , , + i64, i64); define @intrinsic_vfmax_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfmax.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -246,13 +251,14 @@ , , , + i64, i64); define @intrinsic_vfmax_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vfmax.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -261,7 +267,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -291,12 +297,13 @@ , , , + i64, i64); define @intrinsic_vfmax_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfmax.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -335,12 +342,13 @@ , , , + i64, i64); define @intrinsic_vfmax_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfmax.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -379,12 +387,13 @@ , , , + i64, i64); define @intrinsic_vfmax_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfmax.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -423,12 +432,13 @@ , , , + i64, i64); define @intrinsic_vfmax_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfmax.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -467,13 +477,14 @@ , , , + i64, i64); define @intrinsic_vfmax_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vfmax.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -482,7 +493,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -512,12 +523,13 @@ , , , + i64, i64); define @intrinsic_vfmax_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfmax.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -526,7 +538,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -556,12 +568,13 @@ , , , + i64, i64); define @intrinsic_vfmax_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfmax.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -600,12 +613,13 @@ , , , + i64, i64); define @intrinsic_vfmax_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfmax.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -644,13 +658,14 @@ , , , + i64, i64); define @intrinsic_vfmax_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vfmax.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -659,7 +674,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -690,13 +705,14 @@ , half, , + i64, i64); define @intrinsic_vfmax_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vfmax.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -705,7 +721,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -736,13 +752,14 @@ , half, , + i64, i64); define @intrinsic_vfmax_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vfmax.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -751,7 +768,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -782,13 +799,14 @@ , half, , + i64, i64); define @intrinsic_vfmax_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vfmax.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -797,7 +815,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -828,13 +846,14 @@ , half, , + i64, i64); define @intrinsic_vfmax_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vfmax.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -843,7 +862,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -874,13 +893,14 @@ , half, , + i64, i64); define @intrinsic_vfmax_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vfmax.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -889,7 +909,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -920,13 +940,14 @@ , half, , + i64, i64); define @intrinsic_vfmax_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vfmax.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -935,7 +956,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -966,13 +987,14 @@ , float, , + i64, i64); define @intrinsic_vfmax_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vfmax.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -981,7 +1003,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1012,13 +1034,14 @@ , float, , + i64, i64); define @intrinsic_vfmax_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vfmax.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1027,7 +1050,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1058,13 +1081,14 @@ , float, , + i64, i64); define @intrinsic_vfmax_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vfmax.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1073,7 +1097,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1104,13 +1128,14 @@ , float, , + i64, i64); define @intrinsic_vfmax_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vfmax.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1119,7 +1144,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1150,13 +1175,14 @@ , float, , + i64, i64); define @intrinsic_vfmax_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vfmax.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1165,7 +1191,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1196,13 +1222,14 @@ , double, , + i64, i64); define @intrinsic_vfmax_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vfmax.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1211,7 +1238,7 @@ %1, double %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1242,13 +1269,14 @@ , double, , + i64, i64); define @intrinsic_vfmax_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vfmax.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1257,7 +1285,7 @@ %1, double %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1288,13 +1316,14 @@ , double, , + i64, i64); define @intrinsic_vfmax_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vfmax.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1303,7 +1332,7 @@ %1, double %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1334,13 +1363,14 @@ , double, , + i64, i64); define @intrinsic_vfmax_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vfmax.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1349,7 +1379,7 @@ %1, double %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmin-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmin-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmin-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmin-rv32.ll @@ -26,12 +26,13 @@ , , , + i32, i32); define @intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfmin.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -70,12 +71,13 @@ , , , + i32, i32); define @intrinsic_vfmin_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfmin.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -114,12 +116,13 @@ , , , + i32, i32); define @intrinsic_vfmin_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfmin.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -158,12 +161,13 @@ , , , + i32, i32); define @intrinsic_vfmin_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfmin.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -202,12 +206,13 @@ , , , + i32, i32); define @intrinsic_vfmin_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfmin.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -246,13 +251,14 @@ , , , + i32, i32); define @intrinsic_vfmin_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vfmin.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -261,7 +267,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -291,12 +297,13 @@ , , , + i32, i32); define @intrinsic_vfmin_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfmin.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -335,12 +342,13 @@ , , , + i32, i32); define @intrinsic_vfmin_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfmin.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -379,12 +387,13 @@ , , , + i32, i32); define @intrinsic_vfmin_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfmin.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -423,12 +432,13 @@ , , , + i32, i32); define @intrinsic_vfmin_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfmin.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -467,13 +477,14 @@ , , , + i32, i32); define @intrinsic_vfmin_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vfmin.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -482,7 +493,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -512,12 +523,13 @@ , , , + i32, i32); define @intrinsic_vfmin_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfmin.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -526,7 +538,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -556,12 +568,13 @@ , , , + i32, i32); define @intrinsic_vfmin_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfmin.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -600,12 +613,13 @@ , , , + i32, i32); define @intrinsic_vfmin_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfmin.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -644,13 +658,14 @@ , , , + i32, i32); define @intrinsic_vfmin_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vfmin.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -659,7 +674,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -690,13 +705,14 @@ , half, , + i32, i32); define @intrinsic_vfmin_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vfmin.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -705,7 +721,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -736,13 +752,14 @@ , half, , + i32, i32); define @intrinsic_vfmin_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vfmin.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -751,7 +768,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -782,13 +799,14 @@ , half, , + i32, i32); define @intrinsic_vfmin_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vfmin.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -797,7 +815,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -828,13 +846,14 @@ , half, , + i32, i32); define @intrinsic_vfmin_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vfmin.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -843,7 +862,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -874,13 +893,14 @@ , half, , + i32, i32); define @intrinsic_vfmin_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vfmin.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -889,7 +909,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -920,13 +940,14 @@ , half, , + i32, i32); define @intrinsic_vfmin_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vfmin.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -935,7 +956,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -966,13 +987,14 @@ , float, , + i32, i32); define @intrinsic_vfmin_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vfmin.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -981,7 +1003,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1012,13 +1034,14 @@ , float, , + i32, i32); define @intrinsic_vfmin_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vfmin.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1027,7 +1050,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1058,13 +1081,14 @@ , float, , + i32, i32); define @intrinsic_vfmin_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vfmin.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1073,7 +1097,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1104,13 +1128,14 @@ , float, , + i32, i32); define @intrinsic_vfmin_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vfmin.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1119,7 +1144,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1150,13 +1175,14 @@ , float, , + i32, i32); define @intrinsic_vfmin_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vfmin.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1165,7 +1191,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1200,6 +1226,7 @@ , double, , + i32, i32); define @intrinsic_vfmin_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { @@ -1209,7 +1236,7 @@ ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vfmin.vf v8, v9, ft0, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1219,7 +1246,7 @@ %1, double %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1254,6 +1281,7 @@ , double, , + i32, i32); define @intrinsic_vfmin_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { @@ -1263,7 +1291,7 @@ ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vfmin.vf v8, v10, ft0, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1273,7 +1301,7 @@ %1, double %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1308,6 +1336,7 @@ , double, , + i32, i32); define @intrinsic_vfmin_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { @@ -1317,7 +1346,7 @@ ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vfmin.vf v8, v12, ft0, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1327,7 +1356,7 @@ %1, double %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1362,6 +1391,7 @@ , double, , + i32, i32); define @intrinsic_vfmin_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { @@ -1371,7 +1401,7 @@ ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vfmin.vf v8, v16, ft0, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1381,7 +1411,7 @@ %1, double %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmin-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmin-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmin-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmin-rv64.ll @@ -26,12 +26,13 @@ , , , + i64, i64); define @intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfmin.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -70,12 +71,13 @@ , , , + i64, i64); define @intrinsic_vfmin_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfmin.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -114,12 +116,13 @@ , , , + i64, i64); define @intrinsic_vfmin_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfmin.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -158,12 +161,13 @@ , , , + i64, i64); define @intrinsic_vfmin_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfmin.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -202,12 +206,13 @@ , , , + i64, i64); define @intrinsic_vfmin_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfmin.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -246,13 +251,14 @@ , , , + i64, i64); define @intrinsic_vfmin_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vfmin.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -261,7 +267,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -291,12 +297,13 @@ , , , + i64, i64); define @intrinsic_vfmin_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfmin.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -335,12 +342,13 @@ , , , + i64, i64); define @intrinsic_vfmin_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfmin.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -379,12 +387,13 @@ , , , + i64, i64); define @intrinsic_vfmin_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfmin.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -423,12 +432,13 @@ , , , + i64, i64); define @intrinsic_vfmin_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfmin.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -467,13 +477,14 @@ , , , + i64, i64); define @intrinsic_vfmin_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vfmin.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -482,7 +493,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -512,12 +523,13 @@ , , , + i64, i64); define @intrinsic_vfmin_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfmin.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -526,7 +538,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -556,12 +568,13 @@ , , , + i64, i64); define @intrinsic_vfmin_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfmin.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -600,12 +613,13 @@ , , , + i64, i64); define @intrinsic_vfmin_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfmin.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -644,13 +658,14 @@ , , , + i64, i64); define @intrinsic_vfmin_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vfmin.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -659,7 +674,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -690,13 +705,14 @@ , half, , + i64, i64); define @intrinsic_vfmin_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vfmin.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -705,7 +721,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -736,13 +752,14 @@ , half, , + i64, i64); define @intrinsic_vfmin_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vfmin.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -751,7 +768,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -782,13 +799,14 @@ , half, , + i64, i64); define @intrinsic_vfmin_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vfmin.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -797,7 +815,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -828,13 +846,14 @@ , half, , + i64, i64); define @intrinsic_vfmin_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vfmin.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -843,7 +862,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -874,13 +893,14 @@ , half, , + i64, i64); define @intrinsic_vfmin_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vfmin.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -889,7 +909,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -920,13 +940,14 @@ , half, , + i64, i64); define @intrinsic_vfmin_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vfmin.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -935,7 +956,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -966,13 +987,14 @@ , float, , + i64, i64); define @intrinsic_vfmin_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vfmin.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -981,7 +1003,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1012,13 +1034,14 @@ , float, , + i64, i64); define @intrinsic_vfmin_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vfmin.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1027,7 +1050,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1058,13 +1081,14 @@ , float, , + i64, i64); define @intrinsic_vfmin_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vfmin.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1073,7 +1097,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1104,13 +1128,14 @@ , float, , + i64, i64); define @intrinsic_vfmin_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vfmin.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1119,7 +1144,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1150,13 +1175,14 @@ , float, , + i64, i64); define @intrinsic_vfmin_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vfmin.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1165,7 +1191,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1196,13 +1222,14 @@ , double, , + i64, i64); define @intrinsic_vfmin_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vfmin.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1211,7 +1238,7 @@ %1, double %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1242,13 +1269,14 @@ , double, , + i64, i64); define @intrinsic_vfmin_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vfmin.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1257,7 +1285,7 @@ %1, double %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1288,13 +1316,14 @@ , double, , + i64, i64); define @intrinsic_vfmin_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vfmin.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1303,7 +1332,7 @@ %1, double %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1334,13 +1363,14 @@ , double, , + i64, i64); define @intrinsic_vfmin_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vfmin.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1349,7 +1379,7 @@ %1, double %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmul-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmul-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmul-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmul-rv32.ll @@ -26,12 +26,13 @@ , , , + i32, i32); define @intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -70,12 +71,13 @@ , , , + i32, i32); define @intrinsic_vfmul_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -114,12 +116,13 @@ , , , + i32, i32); define @intrinsic_vfmul_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -158,12 +161,13 @@ , , , + i32, i32); define @intrinsic_vfmul_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfmul.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -202,12 +206,13 @@ , , , + i32, i32); define @intrinsic_vfmul_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfmul.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -246,13 +251,14 @@ , , , + i32, i32); define @intrinsic_vfmul_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vfmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -261,7 +267,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -291,12 +297,13 @@ , , , + i32, i32); define @intrinsic_vfmul_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -335,12 +342,13 @@ , , , + i32, i32); define @intrinsic_vfmul_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -379,12 +387,13 @@ , , , + i32, i32); define @intrinsic_vfmul_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfmul.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -423,12 +432,13 @@ , , , + i32, i32); define @intrinsic_vfmul_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfmul.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -467,13 +477,14 @@ , , , + i32, i32); define @intrinsic_vfmul_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vfmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -482,7 +493,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -512,12 +523,13 @@ , , , + i32, i32); define @intrinsic_vfmul_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -526,7 +538,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -556,12 +568,13 @@ , , , + i32, i32); define @intrinsic_vfmul_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfmul.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -600,12 +613,13 @@ , , , + i32, i32); define @intrinsic_vfmul_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfmul.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -644,13 +658,14 @@ , , , + i32, i32); define @intrinsic_vfmul_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vfmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -659,7 +674,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -690,13 +705,14 @@ , half, , + i32, i32); define @intrinsic_vfmul_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vfmul.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -705,7 +721,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -736,13 +752,14 @@ , half, , + i32, i32); define @intrinsic_vfmul_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vfmul.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -751,7 +768,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -782,13 +799,14 @@ , half, , + i32, i32); define @intrinsic_vfmul_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vfmul.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -797,7 +815,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -828,13 +846,14 @@ , half, , + i32, i32); define @intrinsic_vfmul_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vfmul.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -843,7 +862,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -874,13 +893,14 @@ , half, , + i32, i32); define @intrinsic_vfmul_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vfmul.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -889,7 +909,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -920,13 +940,14 @@ , half, , + i32, i32); define @intrinsic_vfmul_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vfmul.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -935,7 +956,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -966,13 +987,14 @@ , float, , + i32, i32); define @intrinsic_vfmul_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vfmul.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -981,7 +1003,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1012,13 +1034,14 @@ , float, , + i32, i32); define @intrinsic_vfmul_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vfmul.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1027,7 +1050,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1058,13 +1081,14 @@ , float, , + i32, i32); define @intrinsic_vfmul_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vfmul.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1073,7 +1097,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1104,13 +1128,14 @@ , float, , + i32, i32); define @intrinsic_vfmul_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vfmul.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1119,7 +1144,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1150,13 +1175,14 @@ , float, , + i32, i32); define @intrinsic_vfmul_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vfmul.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1165,7 +1191,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1200,6 +1226,7 @@ , double, , + i32, i32); define @intrinsic_vfmul_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { @@ -1209,7 +1236,7 @@ ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vfmul.vf v8, v9, ft0, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1219,7 +1246,7 @@ %1, double %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1254,6 +1281,7 @@ , double, , + i32, i32); define @intrinsic_vfmul_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { @@ -1263,7 +1291,7 @@ ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vfmul.vf v8, v10, ft0, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1273,7 +1301,7 @@ %1, double %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1308,6 +1336,7 @@ , double, , + i32, i32); define @intrinsic_vfmul_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { @@ -1317,7 +1346,7 @@ ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vfmul.vf v8, v12, ft0, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1327,7 +1356,7 @@ %1, double %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1362,6 +1391,7 @@ , double, , + i32, i32); define @intrinsic_vfmul_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { @@ -1371,7 +1401,7 @@ ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vfmul.vf v8, v16, ft0, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1381,7 +1411,7 @@ %1, double %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmul-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmul-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmul-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmul-rv64.ll @@ -26,12 +26,13 @@ , , , + i64, i64); define @intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -70,12 +71,13 @@ , , , + i64, i64); define @intrinsic_vfmul_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -114,12 +116,13 @@ , , , + i64, i64); define @intrinsic_vfmul_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -158,12 +161,13 @@ , , , + i64, i64); define @intrinsic_vfmul_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfmul.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -202,12 +206,13 @@ , , , + i64, i64); define @intrinsic_vfmul_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfmul.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -246,13 +251,14 @@ , , , + i64, i64); define @intrinsic_vfmul_mask_vv_nxv32f16_nxv32f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vfmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -261,7 +267,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -291,12 +297,13 @@ , , , + i64, i64); define @intrinsic_vfmul_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -335,12 +342,13 @@ , , , + i64, i64); define @intrinsic_vfmul_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -379,12 +387,13 @@ , , , + i64, i64); define @intrinsic_vfmul_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfmul.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -423,12 +432,13 @@ , , , + i64, i64); define @intrinsic_vfmul_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfmul.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -467,13 +477,14 @@ , , , + i64, i64); define @intrinsic_vfmul_mask_vv_nxv16f32_nxv16f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vfmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -482,7 +493,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -512,12 +523,13 @@ , , , + i64, i64); define @intrinsic_vfmul_mask_vv_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -526,7 +538,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -556,12 +568,13 @@ , , , + i64, i64); define @intrinsic_vfmul_mask_vv_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfmul.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -600,12 +613,13 @@ , , , + i64, i64); define @intrinsic_vfmul_mask_vv_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfmul.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -644,13 +658,14 @@ , , , + i64, i64); define @intrinsic_vfmul_mask_vv_nxv8f64_nxv8f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vfmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -659,7 +674,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -690,13 +705,14 @@ , half, , + i64, i64); define @intrinsic_vfmul_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vfmul.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -705,7 +721,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -736,13 +752,14 @@ , half, , + i64, i64); define @intrinsic_vfmul_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vfmul.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -751,7 +768,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -782,13 +799,14 @@ , half, , + i64, i64); define @intrinsic_vfmul_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vfmul.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -797,7 +815,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -828,13 +846,14 @@ , half, , + i64, i64); define @intrinsic_vfmul_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vfmul.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -843,7 +862,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -874,13 +893,14 @@ , half, , + i64, i64); define @intrinsic_vfmul_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vfmul.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -889,7 +909,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -920,13 +940,14 @@ , half, , + i64, i64); define @intrinsic_vfmul_mask_vf_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vfmul.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -935,7 +956,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -966,13 +987,14 @@ , float, , + i64, i64); define @intrinsic_vfmul_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vfmul.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -981,7 +1003,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1012,13 +1034,14 @@ , float, , + i64, i64); define @intrinsic_vfmul_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vfmul.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1027,7 +1050,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1058,13 +1081,14 @@ , float, , + i64, i64); define @intrinsic_vfmul_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vfmul.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1073,7 +1097,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1104,13 +1128,14 @@ , float, , + i64, i64); define @intrinsic_vfmul_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vfmul.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1119,7 +1144,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1150,13 +1175,14 @@ , float, , + i64, i64); define @intrinsic_vfmul_mask_vf_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vfmul.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1165,7 +1191,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1196,13 +1222,14 @@ , double, , + i64, i64); define @intrinsic_vfmul_mask_vf_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vfmul.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1211,7 +1238,7 @@ %1, double %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1242,13 +1269,14 @@ , double, , + i64, i64); define @intrinsic_vfmul_mask_vf_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vfmul.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1257,7 +1285,7 @@ %1, double %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1288,13 +1316,14 @@ , double, , + i64, i64); define @intrinsic_vfmul_mask_vf_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vfmul.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1303,7 +1332,7 @@ %1, double %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1334,13 +1363,14 @@ , double, , + i64, i64); define @intrinsic_vfmul_mask_vf_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vfmul.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1349,7 +1379,7 @@ %1, double %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-rv32.ll @@ -24,12 +24,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -37,7 +38,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -65,12 +66,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_f.f.w_nxv2f16_nxv2f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv2f16_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -78,7 +80,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -106,12 +108,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_f.f.w_nxv4f16_nxv4f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv4f16_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -119,7 +122,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -147,12 +150,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_f.f.w_nxv8f16_nxv8f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv8f16_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -160,7 +164,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -188,12 +192,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_f.f.w_nxv16f16_nxv16f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv16f16_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -201,7 +206,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -229,12 +234,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_f.f.w_nxv1f32_nxv1f64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv1f32_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -242,7 +248,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -270,12 +276,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_f.f.w_nxv2f32_nxv2f64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv2f32_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -283,7 +290,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -311,12 +318,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_f.f.w_nxv4f32_nxv4f64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv4f32_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -324,7 +332,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -352,12 +360,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_f.f.w_nxv8f32_nxv8f64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv8f32_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -365,7 +374,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-rv64.ll @@ -24,12 +24,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -37,7 +38,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -65,12 +66,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_f.f.w_nxv2f16_nxv2f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv2f16_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -78,7 +80,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -106,12 +108,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_f.f.w_nxv4f16_nxv4f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv4f16_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -119,7 +122,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -147,12 +150,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_f.f.w_nxv8f16_nxv8f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv8f16_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -160,7 +164,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -188,12 +192,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_f.f.w_nxv16f16_nxv16f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv16f16_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -201,7 +206,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -229,12 +234,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_f.f.w_nxv1f32_nxv1f64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv1f32_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -242,7 +248,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -270,12 +276,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_f.f.w_nxv2f32_nxv2f64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv2f32_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -283,7 +290,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -311,12 +318,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_f.f.w_nxv4f32_nxv4f64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv4f32_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -324,7 +332,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -352,12 +360,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_f.f.w_nxv8f32_nxv8f64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv8f32_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -365,7 +374,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x-rv32.ll @@ -24,12 +24,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfncvt.f.x.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -37,7 +38,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -65,12 +66,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_f.x.w_nxv2f16_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfncvt.f.x.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -78,7 +80,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -106,12 +108,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_f.x.w_nxv4f16_nxv4i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfncvt.f.x.w v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -119,7 +122,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -147,12 +150,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_f.x.w_nxv8f16_nxv8i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfncvt.f.x.w v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -160,7 +164,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -188,12 +192,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_f.x.w_nxv16f16_nxv16i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfncvt.f.x.w v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -201,7 +206,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -229,12 +234,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_f.x.w_nxv1f32_nxv1i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfncvt.f.x.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -242,7 +248,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -270,12 +276,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_f.x.w_nxv2f32_nxv2i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.f.x.w v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -283,7 +290,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -311,12 +318,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_f.x.w_nxv4f32_nxv4i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfncvt.f.x.w v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -324,7 +332,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -352,12 +360,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_f.x.w_nxv8f32_nxv8i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfncvt.f.x.w v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -365,7 +374,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x-rv64.ll @@ -24,12 +24,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfncvt.f.x.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -37,7 +38,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -65,12 +66,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_f.x.w_nxv2f16_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfncvt.f.x.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -78,7 +80,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -106,12 +108,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_f.x.w_nxv4f16_nxv4i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfncvt.f.x.w v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -119,7 +122,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -147,12 +150,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_f.x.w_nxv8f16_nxv8i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfncvt.f.x.w v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -160,7 +164,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -188,12 +192,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_f.x.w_nxv16f16_nxv16i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfncvt.f.x.w v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -201,7 +206,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -229,12 +234,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_f.x.w_nxv1f32_nxv1i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfncvt.f.x.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -242,7 +248,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -270,12 +276,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_f.x.w_nxv2f32_nxv2i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.f.x.w v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -283,7 +290,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -311,12 +318,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_f.x.w_nxv4f32_nxv4i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfncvt.f.x.w v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -324,7 +332,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -352,12 +360,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_f.x.w_nxv8f32_nxv8i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfncvt.f.x.w v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -365,7 +374,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu-rv32.ll @@ -24,12 +24,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_f.xu.w_nxv1f16_nxv1i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfncvt.f.xu.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -37,7 +38,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -65,12 +66,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_f.xu.w_nxv2f16_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfncvt.f.xu.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -78,7 +80,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -106,12 +108,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_f.xu.w_nxv4f16_nxv4i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfncvt.f.xu.w v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -119,7 +122,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -147,12 +150,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_f.xu.w_nxv8f16_nxv8i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfncvt.f.xu.w v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -160,7 +164,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -188,12 +192,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_f.xu.w_nxv16f16_nxv16i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfncvt.f.xu.w v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -201,7 +206,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -229,12 +234,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_f.xu.w_nxv1f32_nxv1i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfncvt.f.xu.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -242,7 +248,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -270,12 +276,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_f.xu.w_nxv2f32_nxv2i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.f.xu.w v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -283,7 +290,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -311,12 +318,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_f.xu.w_nxv4f32_nxv4i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfncvt.f.xu.w v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -324,7 +332,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -352,12 +360,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_f.xu.w_nxv8f32_nxv8i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfncvt.f.xu.w v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -365,7 +374,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu-rv64.ll @@ -24,12 +24,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_f.xu.w_nxv1f16_nxv1i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfncvt.f.xu.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -37,7 +38,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -65,12 +66,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_f.xu.w_nxv2f16_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfncvt.f.xu.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -78,7 +80,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -106,12 +108,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_f.xu.w_nxv4f16_nxv4i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfncvt.f.xu.w v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -119,7 +122,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -147,12 +150,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_f.xu.w_nxv8f16_nxv8i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfncvt.f.xu.w v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -160,7 +164,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -188,12 +192,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_f.xu.w_nxv16f16_nxv16i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfncvt.f.xu.w v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -201,7 +206,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -229,12 +234,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_f.xu.w_nxv1f32_nxv1i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfncvt.f.xu.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -242,7 +248,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -270,12 +276,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_f.xu.w_nxv2f32_nxv2i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.f.xu.w v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -283,7 +290,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -311,12 +318,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_f.xu.w_nxv4f32_nxv4i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfncvt.f.xu.w v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -324,7 +332,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -352,12 +360,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_f.xu.w_nxv8f32_nxv8i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfncvt.f.xu.w v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -365,7 +374,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv32.ll @@ -24,12 +24,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_rod.f.f.w_nxv1f16_nxv1f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv1f16_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfncvt.rod.f.f.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -37,7 +38,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -65,12 +66,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_rod.f.f.w_nxv2f16_nxv2f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv2f16_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfncvt.rod.f.f.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -78,7 +80,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -106,12 +108,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_rod.f.f.w_nxv4f16_nxv4f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv4f16_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfncvt.rod.f.f.w v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -119,7 +122,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -147,12 +150,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_rod.f.f.w_nxv8f16_nxv8f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv8f16_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfncvt.rod.f.f.w v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -160,7 +164,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -188,12 +192,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_rod.f.f.w_nxv16f16_nxv16f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv16f16_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfncvt.rod.f.f.w v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -201,7 +206,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -229,12 +234,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_rod.f.f.w_nxv1f32_nxv1f64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv1f32_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfncvt.rod.f.f.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -242,7 +248,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -270,12 +276,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_rod.f.f.w_nxv2f32_nxv2f64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv2f32_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.rod.f.f.w v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -283,7 +290,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -311,12 +318,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_rod.f.f.w_nxv4f32_nxv4f64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv4f32_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfncvt.rod.f.f.w v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -324,7 +332,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -352,12 +360,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_rod.f.f.w_nxv8f32_nxv8f64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv8f32_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfncvt.rod.f.f.w v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -365,7 +374,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv64.ll @@ -24,12 +24,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_rod.f.f.w_nxv1f16_nxv1f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv1f16_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfncvt.rod.f.f.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -37,7 +38,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -65,12 +66,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_rod.f.f.w_nxv2f16_nxv2f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv2f16_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfncvt.rod.f.f.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -78,7 +80,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -106,12 +108,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_rod.f.f.w_nxv4f16_nxv4f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv4f16_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfncvt.rod.f.f.w v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -119,7 +122,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -147,12 +150,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_rod.f.f.w_nxv8f16_nxv8f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv8f16_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfncvt.rod.f.f.w v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -160,7 +164,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -188,12 +192,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_rod.f.f.w_nxv16f16_nxv16f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv16f16_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfncvt.rod.f.f.w v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -201,7 +206,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -229,12 +234,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_rod.f.f.w_nxv1f32_nxv1f64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv1f32_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfncvt.rod.f.f.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -242,7 +248,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -270,12 +276,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_rod.f.f.w_nxv2f32_nxv2f64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv2f32_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.rod.f.f.w v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -283,7 +290,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -311,12 +318,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_rod.f.f.w_nxv4f32_nxv4f64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv4f32_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfncvt.rod.f.f.w v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -324,7 +332,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -352,12 +360,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_rod.f.f.w_nxv8f32_nxv8f64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv8f32_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfncvt.rod.f.f.w v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -365,7 +374,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv32.ll @@ -24,12 +24,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i8_nxv1f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i8_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -37,7 +38,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -65,12 +66,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i8_nxv2f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i8_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -78,7 +80,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -106,12 +108,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i8_nxv4f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i8_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -119,7 +122,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -147,12 +150,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i8_nxv8f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i8_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -160,7 +164,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -188,12 +192,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i8_nxv16f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i8_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -201,7 +206,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -229,12 +234,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv32i8_nxv32f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv32i8_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -242,7 +248,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -270,12 +276,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i16_nxv1f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i16_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -283,7 +290,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -311,12 +318,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i16_nxv2f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i16_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -324,7 +332,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -352,12 +360,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i16_nxv4f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i16_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -365,7 +374,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -393,12 +402,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i16_nxv8f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i16_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -406,7 +416,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -434,12 +444,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i16_nxv16f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i16_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -447,7 +458,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -475,12 +486,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i32_nxv1f64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i32_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -488,7 +500,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -516,12 +528,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i32_nxv2f64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i32_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -529,7 +542,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -557,12 +570,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i32_nxv4f64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i32_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +584,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -598,12 +612,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i32_nxv8f64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i32_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -611,7 +626,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv64.ll @@ -24,12 +24,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i8_nxv1f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i8_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -37,7 +38,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -65,12 +66,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i8_nxv2f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i8_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -78,7 +80,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -106,12 +108,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i8_nxv4f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i8_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -119,7 +122,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -147,12 +150,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i8_nxv8f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i8_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -160,7 +164,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -188,12 +192,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i8_nxv16f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i8_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -201,7 +206,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -229,12 +234,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv32i8_nxv32f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv32i8_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -242,7 +248,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -270,12 +276,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i16_nxv1f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i16_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -283,7 +290,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -311,12 +318,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i16_nxv2f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i16_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -324,7 +332,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -352,12 +360,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i16_nxv4f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i16_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -365,7 +374,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -393,12 +402,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i16_nxv8f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i16_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -406,7 +416,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -434,12 +444,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i16_nxv16f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i16_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -447,7 +458,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -475,12 +486,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i32_nxv1f64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i32_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -488,7 +500,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -516,12 +528,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i32_nxv2f64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i32_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -529,7 +542,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -557,12 +570,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i32_nxv4f64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i32_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +584,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -598,12 +612,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i32_nxv8f64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i32_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -611,7 +626,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv32.ll @@ -24,12 +24,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i8_nxv1f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i8_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -37,7 +38,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -65,12 +66,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i8_nxv2f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i8_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -78,7 +80,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -106,12 +108,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i8_nxv4f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i8_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -119,7 +122,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -147,12 +150,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i8_nxv8f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i8_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -160,7 +164,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -188,12 +192,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i8_nxv16f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i8_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -201,7 +206,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -229,12 +234,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv32i8_nxv32f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv32i8_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -242,7 +248,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -270,12 +276,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i16_nxv1f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i16_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -283,7 +290,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -311,12 +318,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i16_nxv2f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i16_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -324,7 +332,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -352,12 +360,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i16_nxv4f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i16_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -365,7 +374,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -393,12 +402,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i16_nxv8f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i16_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -406,7 +416,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -434,12 +444,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i16_nxv16f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i16_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -447,7 +458,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -475,12 +486,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i32_nxv1f64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i32_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -488,7 +500,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -516,12 +528,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i32_nxv2f64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i32_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -529,7 +542,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -557,12 +570,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i32_nxv4f64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i32_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +584,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -598,12 +612,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i32_nxv8f64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i32_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -611,7 +626,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv64.ll @@ -24,12 +24,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i8_nxv1f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i8_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -37,7 +38,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -65,12 +66,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i8_nxv2f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i8_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -78,7 +80,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -106,12 +108,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i8_nxv4f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i8_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -119,7 +122,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -147,12 +150,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i8_nxv8f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i8_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -160,7 +164,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -188,12 +192,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i8_nxv16f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i8_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -201,7 +206,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -229,12 +234,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv32i8_nxv32f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv32i8_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -242,7 +248,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -270,12 +276,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i16_nxv1f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i16_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -283,7 +290,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -311,12 +318,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i16_nxv2f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i16_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -324,7 +332,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -352,12 +360,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i16_nxv4f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i16_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -365,7 +374,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -393,12 +402,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i16_nxv8f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i16_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -406,7 +416,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -434,12 +444,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i16_nxv16f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i16_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -447,7 +458,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -475,12 +486,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i32_nxv1f64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i32_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -488,7 +500,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -516,12 +528,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i32_nxv2f64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i32_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -529,7 +542,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -557,12 +570,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i32_nxv4f64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i32_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +584,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -598,12 +612,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i32_nxv8f64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i32_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -611,7 +626,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f-rv32.ll @@ -24,12 +24,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_x.f.w_nxv1i8_nxv1f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv1i8_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vfncvt.x.f.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -37,7 +38,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -65,12 +66,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_x.f.w_nxv2i8_nxv2f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv2i8_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vfncvt.x.f.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -78,7 +80,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -106,12 +108,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_x.f.w_nxv4i8_nxv4f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv4i8_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vfncvt.x.f.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -119,7 +122,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -147,12 +150,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_x.f.w_nxv8i8_nxv8f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv8i8_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vfncvt.x.f.w v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -160,7 +164,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -188,12 +192,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_x.f.w_nxv16i8_nxv16f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv16i8_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vfncvt.x.f.w v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -201,7 +206,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -229,12 +234,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_x.f.w_nxv32i8_nxv32f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv32i8_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vfncvt.x.f.w v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -242,7 +248,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -270,12 +276,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_x.f.w_nxv1i16_nxv1f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv1i16_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfncvt.x.f.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -283,7 +290,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -311,12 +318,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_x.f.w_nxv2i16_nxv2f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv2i16_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfncvt.x.f.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -324,7 +332,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -352,12 +360,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_x.f.w_nxv4i16_nxv4f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv4i16_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfncvt.x.f.w v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -365,7 +374,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -393,12 +402,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_x.f.w_nxv8i16_nxv8f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv8i16_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfncvt.x.f.w v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -406,7 +416,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -434,12 +444,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_x.f.w_nxv16i16_nxv16f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv16i16_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfncvt.x.f.w v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -447,7 +458,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -475,12 +486,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_x.f.w_nxv1i32_nxv1f64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv1i32_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfncvt.x.f.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -488,7 +500,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -516,12 +528,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_x.f.w_nxv2i32_nxv2f64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv2i32_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.x.f.w v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -529,7 +542,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -557,12 +570,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_x.f.w_nxv4i32_nxv4f64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv4i32_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfncvt.x.f.w v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +584,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -598,12 +612,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_x.f.w_nxv8i32_nxv8f64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv8i32_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfncvt.x.f.w v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -611,7 +626,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f-rv64.ll @@ -24,12 +24,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_x.f.w_nxv1i8_nxv1f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv1i8_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vfncvt.x.f.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -37,7 +38,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -65,12 +66,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_x.f.w_nxv2i8_nxv2f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv2i8_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vfncvt.x.f.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -78,7 +80,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -106,12 +108,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_x.f.w_nxv4i8_nxv4f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv4i8_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vfncvt.x.f.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -119,7 +122,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -147,12 +150,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_x.f.w_nxv8i8_nxv8f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv8i8_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vfncvt.x.f.w v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -160,7 +164,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -188,12 +192,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_x.f.w_nxv16i8_nxv16f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv16i8_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vfncvt.x.f.w v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -201,7 +206,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -229,12 +234,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_x.f.w_nxv32i8_nxv32f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv32i8_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vfncvt.x.f.w v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -242,7 +248,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -270,12 +276,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_x.f.w_nxv1i16_nxv1f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv1i16_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfncvt.x.f.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -283,7 +290,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -311,12 +318,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_x.f.w_nxv2i16_nxv2f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv2i16_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfncvt.x.f.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -324,7 +332,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -352,12 +360,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_x.f.w_nxv4i16_nxv4f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv4i16_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfncvt.x.f.w v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -365,7 +374,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -393,12 +402,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_x.f.w_nxv8i16_nxv8f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv8i16_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfncvt.x.f.w v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -406,7 +416,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -434,12 +444,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_x.f.w_nxv16i16_nxv16f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv16i16_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfncvt.x.f.w v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -447,7 +458,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -475,12 +486,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_x.f.w_nxv1i32_nxv1f64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv1i32_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfncvt.x.f.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -488,7 +500,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -516,12 +528,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_x.f.w_nxv2i32_nxv2f64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv2i32_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.x.f.w v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -529,7 +542,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -557,12 +570,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_x.f.w_nxv4i32_nxv4f64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv4i32_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfncvt.x.f.w v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +584,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -598,12 +612,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_x.f.w_nxv8i32_nxv8f64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv8i32_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfncvt.x.f.w v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -611,7 +626,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f-rv32.ll @@ -24,12 +24,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vfncvt.xu.f.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -37,7 +38,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -65,12 +66,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_xu.f.w_nxv2i8_nxv2f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv2i8_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vfncvt.xu.f.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -78,7 +80,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -106,12 +108,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_xu.f.w_nxv4i8_nxv4f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv4i8_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vfncvt.xu.f.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -119,7 +122,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -147,12 +150,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_xu.f.w_nxv8i8_nxv8f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv8i8_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vfncvt.xu.f.w v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -160,7 +164,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -188,12 +192,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_xu.f.w_nxv16i8_nxv16f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv16i8_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vfncvt.xu.f.w v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -201,7 +206,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -229,12 +234,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_xu.f.w_nxv32i8_nxv32f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv32i8_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vfncvt.xu.f.w v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -242,7 +248,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -270,12 +276,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_xu.f.w_nxv1i16_nxv1f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i16_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfncvt.xu.f.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -283,7 +290,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -311,12 +318,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_xu.f.w_nxv2i16_nxv2f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv2i16_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfncvt.xu.f.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -324,7 +332,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -352,12 +360,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_xu.f.w_nxv4i16_nxv4f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv4i16_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfncvt.xu.f.w v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -365,7 +374,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -393,12 +402,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_xu.f.w_nxv8i16_nxv8f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv8i16_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfncvt.xu.f.w v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -406,7 +416,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -434,12 +444,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_xu.f.w_nxv16i16_nxv16f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv16i16_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfncvt.xu.f.w v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -447,7 +458,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -475,12 +486,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_xu.f.w_nxv1i32_nxv1f64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i32_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfncvt.xu.f.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -488,7 +500,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -516,12 +528,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_xu.f.w_nxv2i32_nxv2f64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv2i32_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.xu.f.w v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -529,7 +542,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -557,12 +570,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_xu.f.w_nxv4i32_nxv4f64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv4i32_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfncvt.xu.f.w v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +584,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -598,12 +612,13 @@ , , , + i32, i32); define @intrinsic_vfncvt_mask_xu.f.w_nxv8i32_nxv8f64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv8i32_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfncvt.xu.f.w v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -611,7 +626,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f-rv64.ll @@ -24,12 +24,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vfncvt.xu.f.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -37,7 +38,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -65,12 +66,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_xu.f.w_nxv2i8_nxv2f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv2i8_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vfncvt.xu.f.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -78,7 +80,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -106,12 +108,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_xu.f.w_nxv4i8_nxv4f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv4i8_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vfncvt.xu.f.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -119,7 +122,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -147,12 +150,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_xu.f.w_nxv8i8_nxv8f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv8i8_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vfncvt.xu.f.w v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -160,7 +164,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -188,12 +192,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_xu.f.w_nxv16i8_nxv16f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv16i8_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vfncvt.xu.f.w v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -201,7 +206,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -229,12 +234,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_xu.f.w_nxv32i8_nxv32f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv32i8_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vfncvt.xu.f.w v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -242,7 +248,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -270,12 +276,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_xu.f.w_nxv1i16_nxv1f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i16_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfncvt.xu.f.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -283,7 +290,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -311,12 +318,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_xu.f.w_nxv2i16_nxv2f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv2i16_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfncvt.xu.f.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -324,7 +332,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -352,12 +360,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_xu.f.w_nxv4i16_nxv4f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv4i16_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfncvt.xu.f.w v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -365,7 +374,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -393,12 +402,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_xu.f.w_nxv8i16_nxv8f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv8i16_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfncvt.xu.f.w v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -406,7 +416,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -434,12 +444,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_xu.f.w_nxv16i16_nxv16f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv16i16_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfncvt.xu.f.w v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -447,7 +458,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -475,12 +486,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_xu.f.w_nxv1i32_nxv1f64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i32_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfncvt.xu.f.w v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -488,7 +500,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -516,12 +528,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_xu.f.w_nxv2i32_nxv2f64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv2i32_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.xu.f.w v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -529,7 +542,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -557,12 +570,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_xu.f.w_nxv4i32_nxv4f64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv4i32_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfncvt.xu.f.w v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +584,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -598,12 +612,13 @@ , , , + i64, i64); define @intrinsic_vfncvt_mask_xu.f.w_nxv8i32_nxv8f64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv8i32_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfncvt.xu.f.w v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -611,7 +626,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv32.ll @@ -27,13 +27,14 @@ , half, , + i32, i32); define @intrinsic_vfrdiv_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vfrdiv.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -42,7 +43,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -73,13 +74,14 @@ , half, , + i32, i32); define @intrinsic_vfrdiv_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vfrdiv.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -88,7 +90,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -119,13 +121,14 @@ , half, , + i32, i32); define @intrinsic_vfrdiv_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vfrdiv.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -134,7 +137,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -165,13 +168,14 @@ , half, , + i32, i32); define @intrinsic_vfrdiv_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vfrdiv.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -180,7 +184,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -211,13 +215,14 @@ , half, , + i32, i32); define @intrinsic_vfrdiv_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vfrdiv.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -226,7 +231,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -257,13 +262,14 @@ , half, , + i32, i32); define @intrinsic_vfrdiv_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vfrdiv.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -272,7 +278,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -303,13 +309,14 @@ , float, , + i32, i32); define @intrinsic_vfrdiv_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vfrdiv.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -318,7 +325,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -349,13 +356,14 @@ , float, , + i32, i32); define @intrinsic_vfrdiv_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vfrdiv.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -364,7 +372,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -395,13 +403,14 @@ , float, , + i32, i32); define @intrinsic_vfrdiv_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vfrdiv.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -410,7 +419,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -441,13 +450,14 @@ , float, , + i32, i32); define @intrinsic_vfrdiv_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vfrdiv.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -456,7 +466,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -487,13 +497,14 @@ , float, , + i32, i32); define @intrinsic_vfrdiv_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vfrdiv.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -502,7 +513,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -537,6 +548,7 @@ , double, , + i32, i32); define @intrinsic_vfrdiv_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { @@ -546,7 +558,7 @@ ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vfrdiv.vf v8, v9, ft0, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -556,7 +568,7 @@ %1, double %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -591,6 +603,7 @@ , double, , + i32, i32); define @intrinsic_vfrdiv_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { @@ -600,7 +613,7 @@ ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vfrdiv.vf v8, v10, ft0, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -610,7 +623,7 @@ %1, double %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -645,6 +658,7 @@ , double, , + i32, i32); define @intrinsic_vfrdiv_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { @@ -654,7 +668,7 @@ ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vfrdiv.vf v8, v12, ft0, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -664,7 +678,7 @@ %1, double %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -699,6 +713,7 @@ , double, , + i32, i32); define @intrinsic_vfrdiv_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { @@ -708,7 +723,7 @@ ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vfrdiv.vf v8, v16, ft0, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -718,7 +733,7 @@ %1, double %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv64.ll @@ -27,13 +27,14 @@ , half, , + i64, i64); define @intrinsic_vfrdiv_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vfrdiv.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -42,7 +43,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -73,13 +74,14 @@ , half, , + i64, i64); define @intrinsic_vfrdiv_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vfrdiv.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -88,7 +90,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -119,13 +121,14 @@ , half, , + i64, i64); define @intrinsic_vfrdiv_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vfrdiv.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -134,7 +137,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -165,13 +168,14 @@ , half, , + i64, i64); define @intrinsic_vfrdiv_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vfrdiv.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -180,7 +184,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -211,13 +215,14 @@ , half, , + i64, i64); define @intrinsic_vfrdiv_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vfrdiv.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -226,7 +231,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -257,13 +262,14 @@ , half, , + i64, i64); define @intrinsic_vfrdiv_mask_vf_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vfrdiv.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -272,7 +278,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -303,13 +309,14 @@ , float, , + i64, i64); define @intrinsic_vfrdiv_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vfrdiv.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -318,7 +325,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -349,13 +356,14 @@ , float, , + i64, i64); define @intrinsic_vfrdiv_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vfrdiv.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -364,7 +372,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -395,13 +403,14 @@ , float, , + i64, i64); define @intrinsic_vfrdiv_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vfrdiv.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -410,7 +419,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -441,13 +450,14 @@ , float, , + i64, i64); define @intrinsic_vfrdiv_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vfrdiv.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -456,7 +466,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -487,13 +497,14 @@ , float, , + i64, i64); define @intrinsic_vfrdiv_mask_vf_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vfrdiv.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -502,7 +513,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -533,13 +544,14 @@ , double, , + i64, i64); define @intrinsic_vfrdiv_mask_vf_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vfrdiv.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -548,7 +560,7 @@ %1, double %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -579,13 +591,14 @@ , double, , + i64, i64); define @intrinsic_vfrdiv_mask_vf_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vfrdiv.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -594,7 +607,7 @@ %1, double %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -625,13 +638,14 @@ , double, , + i64, i64); define @intrinsic_vfrdiv_mask_vf_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vfrdiv.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -640,7 +654,7 @@ %1, double %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -671,13 +685,14 @@ , double, , + i64, i64); define @intrinsic_vfrdiv_mask_vf_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vfrdiv.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -686,7 +701,7 @@ %1, double %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrec7-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfrec7-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfrec7-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfrec7-rv32.ll @@ -23,12 +23,13 @@ , , , + i32, i32); define @intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfrec7.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -36,7 +37,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -63,12 +64,13 @@ , , , + i32, i32); define @intrinsic_vfrec7_mask_v_nxv2f16_nxv2f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfrec7.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -76,7 +78,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -103,12 +105,13 @@ , , , + i32, i32); define @intrinsic_vfrec7_mask_v_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfrec7.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -116,7 +119,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -143,12 +146,13 @@ , , , + i32, i32); define @intrinsic_vfrec7_mask_v_nxv8f16_nxv8f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfrec7.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -156,7 +160,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -183,12 +187,13 @@ , , , + i32, i32); define @intrinsic_vfrec7_mask_v_nxv16f16_nxv16f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfrec7.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -196,7 +201,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -223,12 +228,13 @@ , , , + i32, i32); define @intrinsic_vfrec7_mask_v_nxv32f16_nxv32f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vfrec7.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -236,7 +242,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -263,12 +269,13 @@ , , , + i32, i32); define @intrinsic_vfrec7_mask_v_nxv1f32_nxv1f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfrec7.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -276,7 +283,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -303,12 +310,13 @@ , , , + i32, i32); define @intrinsic_vfrec7_mask_v_nxv2f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfrec7.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -316,7 +324,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -343,12 +351,13 @@ , , , + i32, i32); define @intrinsic_vfrec7_mask_v_nxv4f32_nxv4f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfrec7.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -356,7 +365,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -383,12 +392,13 @@ , , , + i32, i32); define @intrinsic_vfrec7_mask_v_nxv8f32_nxv8f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfrec7.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -396,7 +406,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -423,12 +433,13 @@ , , , + i32, i32); define @intrinsic_vfrec7_mask_v_nxv16f32_nxv16f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vfrec7.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -436,7 +447,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -463,12 +474,13 @@ , , , + i32, i32); define @intrinsic_vfrec7_mask_v_nxv1f64_nxv1f64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfrec7.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -476,7 +488,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -503,12 +515,13 @@ , , , + i32, i32); define @intrinsic_vfrec7_mask_v_nxv2f64_nxv2f64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfrec7.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -516,7 +529,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -543,12 +556,13 @@ , , , + i32, i32); define @intrinsic_vfrec7_mask_v_nxv4f64_nxv4f64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfrec7.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -556,7 +570,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -583,12 +597,13 @@ , , , + i32, i32); define @intrinsic_vfrec7_mask_v_nxv8f64_nxv8f64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vfrec7.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -596,7 +611,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrec7-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfrec7-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfrec7-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfrec7-rv64.ll @@ -23,12 +23,13 @@ , , , + i64, i64); define @intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfrec7.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -36,7 +37,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -63,12 +64,13 @@ , , , + i64, i64); define @intrinsic_vfrec7_mask_v_nxv2f16_nxv2f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfrec7.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -76,7 +78,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -103,12 +105,13 @@ , , , + i64, i64); define @intrinsic_vfrec7_mask_v_nxv4f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfrec7.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -116,7 +119,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -143,12 +146,13 @@ , , , + i64, i64); define @intrinsic_vfrec7_mask_v_nxv8f16_nxv8f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfrec7.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -156,7 +160,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -183,12 +187,13 @@ , , , + i64, i64); define @intrinsic_vfrec7_mask_v_nxv16f16_nxv16f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfrec7.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -196,7 +201,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -223,12 +228,13 @@ , , , + i64, i64); define @intrinsic_vfrec7_mask_v_nxv32f16_nxv32f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vfrec7.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -236,7 +242,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -263,12 +269,13 @@ , , , + i64, i64); define @intrinsic_vfrec7_mask_v_nxv1f32_nxv1f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfrec7.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -276,7 +283,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -303,12 +310,13 @@ , , , + i64, i64); define @intrinsic_vfrec7_mask_v_nxv2f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfrec7.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -316,7 +324,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -343,12 +351,13 @@ , , , + i64, i64); define @intrinsic_vfrec7_mask_v_nxv4f32_nxv4f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfrec7.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -356,7 +365,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -383,12 +392,13 @@ , , , + i64, i64); define @intrinsic_vfrec7_mask_v_nxv8f32_nxv8f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfrec7.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -396,7 +406,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -423,12 +433,13 @@ , , , + i64, i64); define @intrinsic_vfrec7_mask_v_nxv16f32_nxv16f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vfrec7.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -436,7 +447,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -463,12 +474,13 @@ , , , + i64, i64); define @intrinsic_vfrec7_mask_v_nxv1f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfrec7.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -476,7 +488,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -503,12 +515,13 @@ , , , + i64, i64); define @intrinsic_vfrec7_mask_v_nxv2f64_nxv2f64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfrec7.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -516,7 +529,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -543,12 +556,13 @@ , , , + i64, i64); define @intrinsic_vfrec7_mask_v_nxv4f64_nxv4f64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfrec7.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -556,7 +570,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -583,12 +597,13 @@ , , , + i64, i64); define @intrinsic_vfrec7_mask_v_nxv8f64_nxv8f64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vfrec7.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -596,7 +611,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-rv32.ll @@ -23,12 +23,13 @@ , , , + i32, i32); define @intrinsic_vfrsqrt7_mask_v_nxv1f16_nxv1f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfrsqrt7.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -36,7 +37,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -63,12 +64,13 @@ , , , + i32, i32); define @intrinsic_vfrsqrt7_mask_v_nxv2f16_nxv2f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfrsqrt7.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -76,7 +78,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -103,12 +105,13 @@ , , , + i32, i32); define @intrinsic_vfrsqrt7_mask_v_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfrsqrt7.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -116,7 +119,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -143,12 +146,13 @@ , , , + i32, i32); define @intrinsic_vfrsqrt7_mask_v_nxv8f16_nxv8f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfrsqrt7.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -156,7 +160,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -183,12 +187,13 @@ , , , + i32, i32); define @intrinsic_vfrsqrt7_mask_v_nxv16f16_nxv16f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfrsqrt7.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -196,7 +201,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -223,12 +228,13 @@ , , , + i32, i32); define @intrinsic_vfrsqrt7_mask_v_nxv32f16_nxv32f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vfrsqrt7.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -236,7 +242,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -263,12 +269,13 @@ , , , + i32, i32); define @intrinsic_vfrsqrt7_mask_v_nxv1f32_nxv1f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfrsqrt7.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -276,7 +283,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -303,12 +310,13 @@ , , , + i32, i32); define @intrinsic_vfrsqrt7_mask_v_nxv2f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfrsqrt7.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -316,7 +324,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -343,12 +351,13 @@ , , , + i32, i32); define @intrinsic_vfrsqrt7_mask_v_nxv4f32_nxv4f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfrsqrt7.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -356,7 +365,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -383,12 +392,13 @@ , , , + i32, i32); define @intrinsic_vfrsqrt7_mask_v_nxv8f32_nxv8f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfrsqrt7.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -396,7 +406,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -423,12 +433,13 @@ , , , + i32, i32); define @intrinsic_vfrsqrt7_mask_v_nxv16f32_nxv16f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vfrsqrt7.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -436,7 +447,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -463,12 +474,13 @@ , , , + i32, i32); define @intrinsic_vfrsqrt7_mask_v_nxv1f64_nxv1f64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfrsqrt7.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -476,7 +488,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -503,12 +515,13 @@ , , , + i32, i32); define @intrinsic_vfrsqrt7_mask_v_nxv2f64_nxv2f64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfrsqrt7.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -516,7 +529,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -543,12 +556,13 @@ , , , + i32, i32); define @intrinsic_vfrsqrt7_mask_v_nxv4f64_nxv4f64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfrsqrt7.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -556,7 +570,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -583,12 +597,13 @@ , , , + i32, i32); define @intrinsic_vfrsqrt7_mask_v_nxv8f64_nxv8f64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vfrsqrt7.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -596,7 +611,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-rv64.ll @@ -23,12 +23,13 @@ , , , + i64, i64); define @intrinsic_vfrsqrt7_mask_v_nxv1f16_nxv1f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfrsqrt7.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -36,7 +37,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -63,12 +64,13 @@ , , , + i64, i64); define @intrinsic_vfrsqrt7_mask_v_nxv2f16_nxv2f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfrsqrt7.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -76,7 +78,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -103,12 +105,13 @@ , , , + i64, i64); define @intrinsic_vfrsqrt7_mask_v_nxv4f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfrsqrt7.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -116,7 +119,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -143,12 +146,13 @@ , , , + i64, i64); define @intrinsic_vfrsqrt7_mask_v_nxv8f16_nxv8f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfrsqrt7.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -156,7 +160,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -183,12 +187,13 @@ , , , + i64, i64); define @intrinsic_vfrsqrt7_mask_v_nxv16f16_nxv16f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfrsqrt7.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -196,7 +201,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -223,12 +228,13 @@ , , , + i64, i64); define @intrinsic_vfrsqrt7_mask_v_nxv32f16_nxv32f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vfrsqrt7.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -236,7 +242,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -263,12 +269,13 @@ , , , + i64, i64); define @intrinsic_vfrsqrt7_mask_v_nxv1f32_nxv1f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfrsqrt7.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -276,7 +283,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -303,12 +310,13 @@ , , , + i64, i64); define @intrinsic_vfrsqrt7_mask_v_nxv2f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfrsqrt7.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -316,7 +324,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -343,12 +351,13 @@ , , , + i64, i64); define @intrinsic_vfrsqrt7_mask_v_nxv4f32_nxv4f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfrsqrt7.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -356,7 +365,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -383,12 +392,13 @@ , , , + i64, i64); define @intrinsic_vfrsqrt7_mask_v_nxv8f32_nxv8f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfrsqrt7.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -396,7 +406,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -423,12 +433,13 @@ , , , + i64, i64); define @intrinsic_vfrsqrt7_mask_v_nxv16f32_nxv16f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vfrsqrt7.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -436,7 +447,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -463,12 +474,13 @@ , , , + i64, i64); define @intrinsic_vfrsqrt7_mask_v_nxv1f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfrsqrt7.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -476,7 +488,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -503,12 +515,13 @@ , , , + i64, i64); define @intrinsic_vfrsqrt7_mask_v_nxv2f64_nxv2f64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfrsqrt7.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -516,7 +529,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -543,12 +556,13 @@ , , , + i64, i64); define @intrinsic_vfrsqrt7_mask_v_nxv4f64_nxv4f64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfrsqrt7.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -556,7 +570,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -583,12 +597,13 @@ , , , + i64, i64); define @intrinsic_vfrsqrt7_mask_v_nxv8f64_nxv8f64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vfrsqrt7.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -596,7 +611,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv32.ll @@ -27,13 +27,14 @@ , half, , + i32, i32); define @intrinsic_vfrsub_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vfrsub.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -42,7 +43,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -73,13 +74,14 @@ , half, , + i32, i32); define @intrinsic_vfrsub_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vfrsub.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -88,7 +90,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -119,13 +121,14 @@ , half, , + i32, i32); define @intrinsic_vfrsub_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vfrsub.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -134,7 +137,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -165,13 +168,14 @@ , half, , + i32, i32); define @intrinsic_vfrsub_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vfrsub.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -180,7 +184,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -211,13 +215,14 @@ , half, , + i32, i32); define @intrinsic_vfrsub_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vfrsub.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -226,7 +231,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -257,13 +262,14 @@ , half, , + i32, i32); define @intrinsic_vfrsub_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vfrsub.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -272,7 +278,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -303,13 +309,14 @@ , float, , + i32, i32); define @intrinsic_vfrsub_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vfrsub.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -318,7 +325,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -349,13 +356,14 @@ , float, , + i32, i32); define @intrinsic_vfrsub_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vfrsub.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -364,7 +372,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -395,13 +403,14 @@ , float, , + i32, i32); define @intrinsic_vfrsub_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vfrsub.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -410,7 +419,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -441,13 +450,14 @@ , float, , + i32, i32); define @intrinsic_vfrsub_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vfrsub.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -456,7 +466,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -487,13 +497,14 @@ , float, , + i32, i32); define @intrinsic_vfrsub_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vfrsub.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -502,7 +513,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -537,6 +548,7 @@ , double, , + i32, i32); define @intrinsic_vfrsub_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { @@ -546,7 +558,7 @@ ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vfrsub.vf v8, v9, ft0, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -556,7 +568,7 @@ %1, double %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -591,6 +603,7 @@ , double, , + i32, i32); define @intrinsic_vfrsub_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { @@ -600,7 +613,7 @@ ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vfrsub.vf v8, v10, ft0, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -610,7 +623,7 @@ %1, double %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -645,6 +658,7 @@ , double, , + i32, i32); define @intrinsic_vfrsub_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { @@ -654,7 +668,7 @@ ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vfrsub.vf v8, v12, ft0, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -664,7 +678,7 @@ %1, double %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -699,6 +713,7 @@ , double, , + i32, i32); define @intrinsic_vfrsub_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { @@ -708,7 +723,7 @@ ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vfrsub.vf v8, v16, ft0, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -718,7 +733,7 @@ %1, double %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv64.ll @@ -28,13 +28,14 @@ , half, , + i64, i64); define @intrinsic_vfrsub_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vfrsub.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -43,7 +44,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -74,13 +75,14 @@ , half, , + i64, i64); define @intrinsic_vfrsub_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vfrsub.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -89,7 +91,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -120,13 +122,14 @@ , half, , + i64, i64); define @intrinsic_vfrsub_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vfrsub.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -135,7 +138,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -166,13 +169,14 @@ , half, , + i64, i64); define @intrinsic_vfrsub_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vfrsub.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -181,7 +185,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -212,13 +216,14 @@ , half, , + i64, i64); define @intrinsic_vfrsub_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vfrsub.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -227,7 +232,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -258,13 +263,14 @@ , half, , + i64, i64); define @intrinsic_vfrsub_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vfrsub.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -273,7 +279,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -304,13 +310,14 @@ , float, , + i64, i64); define @intrinsic_vfrsub_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vfrsub.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -319,7 +326,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -350,13 +357,14 @@ , float, , + i64, i64); define @intrinsic_vfrsub_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vfrsub.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -365,7 +373,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -396,13 +404,14 @@ , float, , + i64, i64); define @intrinsic_vfrsub_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vfrsub.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -411,7 +420,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -442,13 +451,14 @@ , float, , + i64, i64); define @intrinsic_vfrsub_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vfrsub.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -457,7 +467,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -488,13 +498,14 @@ , float, , + i64, i64); define @intrinsic_vfrsub_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vfrsub.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -503,7 +514,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -534,13 +545,14 @@ , double, , + i64, i64); define @intrinsic_vfrsub_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vfrsub.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -549,7 +561,7 @@ %1, double %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -580,13 +592,14 @@ , double, , + i64, i64); define @intrinsic_vfrsub_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vfrsub.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -595,7 +608,7 @@ %1, double %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -626,13 +639,14 @@ , double, , + i64, i64); define @intrinsic_vfrsub_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vfrsub.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -641,7 +655,7 @@ %1, double %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -672,13 +686,14 @@ , double, , + i64, i64); define @intrinsic_vfrsub_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vfrsub.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -687,7 +702,7 @@ %1, double %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv32.ll @@ -26,12 +26,13 @@ , , , + i32, i32); define @intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -70,12 +71,13 @@ , , , + i32, i32); define @intrinsic_vfsgnj_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -114,12 +116,13 @@ , , , + i32, i32); define @intrinsic_vfsgnj_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -158,12 +161,13 @@ , , , + i32, i32); define @intrinsic_vfsgnj_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -202,12 +206,13 @@ , , , + i32, i32); define @intrinsic_vfsgnj_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -246,13 +251,14 @@ , , , + i32, i32); define @intrinsic_vfsgnj_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -261,7 +267,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -291,12 +297,13 @@ , , , + i32, i32); define @intrinsic_vfsgnj_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -335,12 +342,13 @@ , , , + i32, i32); define @intrinsic_vfsgnj_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -379,12 +387,13 @@ , , , + i32, i32); define @intrinsic_vfsgnj_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -423,12 +432,13 @@ , , , + i32, i32); define @intrinsic_vfsgnj_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -467,13 +477,14 @@ , , , + i32, i32); define @intrinsic_vfsgnj_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -482,7 +493,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -512,12 +523,13 @@ , , , + i32, i32); define @intrinsic_vfsgnj_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -526,7 +538,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -556,12 +568,13 @@ , , , + i32, i32); define @intrinsic_vfsgnj_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -600,12 +613,13 @@ , , , + i32, i32); define @intrinsic_vfsgnj_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -644,13 +658,14 @@ , , , + i32, i32); define @intrinsic_vfsgnj_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -659,7 +674,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -690,13 +705,14 @@ , half, , + i32, i32); define @intrinsic_vfsgnj_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vfsgnj.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -705,7 +721,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -736,13 +752,14 @@ , half, , + i32, i32); define @intrinsic_vfsgnj_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vfsgnj.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -751,7 +768,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -782,13 +799,14 @@ , half, , + i32, i32); define @intrinsic_vfsgnj_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vfsgnj.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -797,7 +815,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -828,13 +846,14 @@ , half, , + i32, i32); define @intrinsic_vfsgnj_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vfsgnj.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -843,7 +862,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -874,13 +893,14 @@ , half, , + i32, i32); define @intrinsic_vfsgnj_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vfsgnj.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -889,7 +909,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -920,13 +940,14 @@ , half, , + i32, i32); define @intrinsic_vfsgnj_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vfsgnj.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -935,7 +956,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -966,13 +987,14 @@ , float, , + i32, i32); define @intrinsic_vfsgnj_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vfsgnj.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -981,7 +1003,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1012,13 +1034,14 @@ , float, , + i32, i32); define @intrinsic_vfsgnj_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vfsgnj.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1027,7 +1050,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1058,13 +1081,14 @@ , float, , + i32, i32); define @intrinsic_vfsgnj_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vfsgnj.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1073,7 +1097,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1104,13 +1128,14 @@ , float, , + i32, i32); define @intrinsic_vfsgnj_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vfsgnj.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1119,7 +1144,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1150,13 +1175,14 @@ , float, , + i32, i32); define @intrinsic_vfsgnj_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vfsgnj.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1165,7 +1191,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1200,6 +1226,7 @@ , double, , + i32, i32); define @intrinsic_vfsgnj_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { @@ -1209,7 +1236,7 @@ ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vfsgnj.vf v8, v9, ft0, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1219,7 +1246,7 @@ %1, double %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1254,6 +1281,7 @@ , double, , + i32, i32); define @intrinsic_vfsgnj_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { @@ -1263,7 +1291,7 @@ ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vfsgnj.vf v8, v10, ft0, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1273,7 +1301,7 @@ %1, double %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1308,6 +1336,7 @@ , double, , + i32, i32); define @intrinsic_vfsgnj_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { @@ -1317,7 +1346,7 @@ ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vfsgnj.vf v8, v12, ft0, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1327,7 +1356,7 @@ %1, double %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1362,6 +1391,7 @@ , double, , + i32, i32); define @intrinsic_vfsgnj_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { @@ -1371,7 +1401,7 @@ ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vf v8, v16, ft0, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1381,7 +1411,7 @@ %1, double %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv64.ll @@ -26,12 +26,13 @@ , , , + i64, i64); define @intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -70,12 +71,13 @@ , , , + i64, i64); define @intrinsic_vfsgnj_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -114,12 +116,13 @@ , , , + i64, i64); define @intrinsic_vfsgnj_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -158,12 +161,13 @@ , , , + i64, i64); define @intrinsic_vfsgnj_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -202,12 +206,13 @@ , , , + i64, i64); define @intrinsic_vfsgnj_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -246,13 +251,14 @@ , , , + i64, i64); define @intrinsic_vfsgnj_mask_vv_nxv32f16_nxv32f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -261,7 +267,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -291,12 +297,13 @@ , , , + i64, i64); define @intrinsic_vfsgnj_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -335,12 +342,13 @@ , , , + i64, i64); define @intrinsic_vfsgnj_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -379,12 +387,13 @@ , , , + i64, i64); define @intrinsic_vfsgnj_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -423,12 +432,13 @@ , , , + i64, i64); define @intrinsic_vfsgnj_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -467,13 +477,14 @@ , , , + i64, i64); define @intrinsic_vfsgnj_mask_vv_nxv16f32_nxv16f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -482,7 +493,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -512,12 +523,13 @@ , , , + i64, i64); define @intrinsic_vfsgnj_mask_vv_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -526,7 +538,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -556,12 +568,13 @@ , , , + i64, i64); define @intrinsic_vfsgnj_mask_vv_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -600,12 +613,13 @@ , , , + i64, i64); define @intrinsic_vfsgnj_mask_vv_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -644,13 +658,14 @@ , , , + i64, i64); define @intrinsic_vfsgnj_mask_vv_nxv8f64_nxv8f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -659,7 +674,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -690,13 +705,14 @@ , half, , + i64, i64); define @intrinsic_vfsgnj_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vfsgnj.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -705,7 +721,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -736,13 +752,14 @@ , half, , + i64, i64); define @intrinsic_vfsgnj_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vfsgnj.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -751,7 +768,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -782,13 +799,14 @@ , half, , + i64, i64); define @intrinsic_vfsgnj_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vfsgnj.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -797,7 +815,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -828,13 +846,14 @@ , half, , + i64, i64); define @intrinsic_vfsgnj_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vfsgnj.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -843,7 +862,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -874,13 +893,14 @@ , half, , + i64, i64); define @intrinsic_vfsgnj_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vfsgnj.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -889,7 +909,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -920,13 +940,14 @@ , half, , + i64, i64); define @intrinsic_vfsgnj_mask_vf_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vfsgnj.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -935,7 +956,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -966,13 +987,14 @@ , float, , + i64, i64); define @intrinsic_vfsgnj_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vfsgnj.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -981,7 +1003,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1012,13 +1034,14 @@ , float, , + i64, i64); define @intrinsic_vfsgnj_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vfsgnj.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1027,7 +1050,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1058,13 +1081,14 @@ , float, , + i64, i64); define @intrinsic_vfsgnj_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vfsgnj.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1073,7 +1097,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1104,13 +1128,14 @@ , float, , + i64, i64); define @intrinsic_vfsgnj_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vfsgnj.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1119,7 +1144,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1150,13 +1175,14 @@ , float, , + i64, i64); define @intrinsic_vfsgnj_mask_vf_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vfsgnj.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1165,7 +1191,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1196,13 +1222,14 @@ , double, , + i64, i64); define @intrinsic_vfsgnj_mask_vf_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vfsgnj.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1211,7 +1238,7 @@ %1, double %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1242,13 +1269,14 @@ , double, , + i64, i64); define @intrinsic_vfsgnj_mask_vf_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vfsgnj.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1257,7 +1285,7 @@ %1, double %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1288,13 +1316,14 @@ , double, , + i64, i64); define @intrinsic_vfsgnj_mask_vf_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vfsgnj.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1303,7 +1332,7 @@ %1, double %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1334,13 +1363,14 @@ , double, , + i64, i64); define @intrinsic_vfsgnj_mask_vf_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1349,7 +1379,7 @@ %1, double %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv32.ll @@ -26,12 +26,13 @@ , , , + i32, i32); define @intrinsic_vfsgnjn_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -70,12 +71,13 @@ , , , + i32, i32); define @intrinsic_vfsgnjn_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -114,12 +116,13 @@ , , , + i32, i32); define @intrinsic_vfsgnjn_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -158,12 +161,13 @@ , , , + i32, i32); define @intrinsic_vfsgnjn_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -202,12 +206,13 @@ , , , + i32, i32); define @intrinsic_vfsgnjn_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -246,13 +251,14 @@ , , , + i32, i32); define @intrinsic_vfsgnjn_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -261,7 +267,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -291,12 +297,13 @@ , , , + i32, i32); define @intrinsic_vfsgnjn_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -335,12 +342,13 @@ , , , + i32, i32); define @intrinsic_vfsgnjn_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -379,12 +387,13 @@ , , , + i32, i32); define @intrinsic_vfsgnjn_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -423,12 +432,13 @@ , , , + i32, i32); define @intrinsic_vfsgnjn_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -467,13 +477,14 @@ , , , + i32, i32); define @intrinsic_vfsgnjn_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -482,7 +493,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -512,12 +523,13 @@ , , , + i32, i32); define @intrinsic_vfsgnjn_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -526,7 +538,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -556,12 +568,13 @@ , , , + i32, i32); define @intrinsic_vfsgnjn_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -600,12 +613,13 @@ , , , + i32, i32); define @intrinsic_vfsgnjn_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -644,13 +658,14 @@ , , , + i32, i32); define @intrinsic_vfsgnjn_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -659,7 +674,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -690,13 +705,14 @@ , half, , + i32, i32); define @intrinsic_vfsgnjn_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vfsgnjn.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -705,7 +721,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -736,13 +752,14 @@ , half, , + i32, i32); define @intrinsic_vfsgnjn_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vfsgnjn.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -751,7 +768,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -782,13 +799,14 @@ , half, , + i32, i32); define @intrinsic_vfsgnjn_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vfsgnjn.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -797,7 +815,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -828,13 +846,14 @@ , half, , + i32, i32); define @intrinsic_vfsgnjn_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vfsgnjn.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -843,7 +862,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -874,13 +893,14 @@ , half, , + i32, i32); define @intrinsic_vfsgnjn_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vfsgnjn.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -889,7 +909,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -920,13 +940,14 @@ , half, , + i32, i32); define @intrinsic_vfsgnjn_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vfsgnjn.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -935,7 +956,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -966,13 +987,14 @@ , float, , + i32, i32); define @intrinsic_vfsgnjn_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vfsgnjn.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -981,7 +1003,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1012,13 +1034,14 @@ , float, , + i32, i32); define @intrinsic_vfsgnjn_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vfsgnjn.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1027,7 +1050,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1058,13 +1081,14 @@ , float, , + i32, i32); define @intrinsic_vfsgnjn_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vfsgnjn.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1073,7 +1097,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1104,13 +1128,14 @@ , float, , + i32, i32); define @intrinsic_vfsgnjn_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vfsgnjn.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1119,7 +1144,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1150,13 +1175,14 @@ , float, , + i32, i32); define @intrinsic_vfsgnjn_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vfsgnjn.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1165,7 +1191,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1200,6 +1226,7 @@ , double, , + i32, i32); define @intrinsic_vfsgnjn_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { @@ -1209,7 +1236,7 @@ ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vfsgnjn.vf v8, v9, ft0, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1219,7 +1246,7 @@ %1, double %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1254,6 +1281,7 @@ , double, , + i32, i32); define @intrinsic_vfsgnjn_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { @@ -1263,7 +1291,7 @@ ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vfsgnjn.vf v8, v10, ft0, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1273,7 +1301,7 @@ %1, double %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1308,6 +1336,7 @@ , double, , + i32, i32); define @intrinsic_vfsgnjn_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { @@ -1317,7 +1346,7 @@ ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vfsgnjn.vf v8, v12, ft0, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1327,7 +1356,7 @@ %1, double %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1362,6 +1391,7 @@ , double, , + i32, i32); define @intrinsic_vfsgnjn_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { @@ -1371,7 +1401,7 @@ ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vfsgnjn.vf v8, v16, ft0, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1381,7 +1411,7 @@ %1, double %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv64.ll @@ -26,12 +26,13 @@ , , , + i64, i64); define @intrinsic_vfsgnjn_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -70,12 +71,13 @@ , , , + i64, i64); define @intrinsic_vfsgnjn_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -114,12 +116,13 @@ , , , + i64, i64); define @intrinsic_vfsgnjn_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -158,12 +161,13 @@ , , , + i64, i64); define @intrinsic_vfsgnjn_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -202,12 +206,13 @@ , , , + i64, i64); define @intrinsic_vfsgnjn_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -246,13 +251,14 @@ , , , + i64, i64); define @intrinsic_vfsgnjn_mask_vv_nxv32f16_nxv32f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -261,7 +267,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -291,12 +297,13 @@ , , , + i64, i64); define @intrinsic_vfsgnjn_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -335,12 +342,13 @@ , , , + i64, i64); define @intrinsic_vfsgnjn_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -379,12 +387,13 @@ , , , + i64, i64); define @intrinsic_vfsgnjn_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -423,12 +432,13 @@ , , , + i64, i64); define @intrinsic_vfsgnjn_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -467,13 +477,14 @@ , , , + i64, i64); define @intrinsic_vfsgnjn_mask_vv_nxv16f32_nxv16f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -482,7 +493,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -512,12 +523,13 @@ , , , + i64, i64); define @intrinsic_vfsgnjn_mask_vv_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -526,7 +538,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -556,12 +568,13 @@ , , , + i64, i64); define @intrinsic_vfsgnjn_mask_vv_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -600,12 +613,13 @@ , , , + i64, i64); define @intrinsic_vfsgnjn_mask_vv_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -644,13 +658,14 @@ , , , + i64, i64); define @intrinsic_vfsgnjn_mask_vv_nxv8f64_nxv8f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -659,7 +674,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -690,13 +705,14 @@ , half, , + i64, i64); define @intrinsic_vfsgnjn_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vfsgnjn.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -705,7 +721,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -736,13 +752,14 @@ , half, , + i64, i64); define @intrinsic_vfsgnjn_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vfsgnjn.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -751,7 +768,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -782,13 +799,14 @@ , half, , + i64, i64); define @intrinsic_vfsgnjn_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vfsgnjn.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -797,7 +815,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -828,13 +846,14 @@ , half, , + i64, i64); define @intrinsic_vfsgnjn_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vfsgnjn.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -843,7 +862,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -874,13 +893,14 @@ , half, , + i64, i64); define @intrinsic_vfsgnjn_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vfsgnjn.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -889,7 +909,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -920,13 +940,14 @@ , half, , + i64, i64); define @intrinsic_vfsgnjn_mask_vf_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vfsgnjn.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -935,7 +956,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -966,13 +987,14 @@ , float, , + i64, i64); define @intrinsic_vfsgnjn_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vfsgnjn.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -981,7 +1003,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1012,13 +1034,14 @@ , float, , + i64, i64); define @intrinsic_vfsgnjn_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vfsgnjn.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1027,7 +1050,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1058,13 +1081,14 @@ , float, , + i64, i64); define @intrinsic_vfsgnjn_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vfsgnjn.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1073,7 +1097,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1104,13 +1128,14 @@ , float, , + i64, i64); define @intrinsic_vfsgnjn_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vfsgnjn.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1119,7 +1144,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1150,13 +1175,14 @@ , float, , + i64, i64); define @intrinsic_vfsgnjn_mask_vf_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vfsgnjn.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1165,7 +1191,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1196,13 +1222,14 @@ , double, , + i64, i64); define @intrinsic_vfsgnjn_mask_vf_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vfsgnjn.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1211,7 +1238,7 @@ %1, double %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1242,13 +1269,14 @@ , double, , + i64, i64); define @intrinsic_vfsgnjn_mask_vf_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vfsgnjn.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1257,7 +1285,7 @@ %1, double %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1288,13 +1316,14 @@ , double, , + i64, i64); define @intrinsic_vfsgnjn_mask_vf_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vfsgnjn.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1303,7 +1332,7 @@ %1, double %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1334,13 +1363,14 @@ , double, , + i64, i64); define @intrinsic_vfsgnjn_mask_vf_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vfsgnjn.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1349,7 +1379,7 @@ %1, double %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv32.ll @@ -26,12 +26,13 @@ , , , + i32, i32); define @intrinsic_vfsgnjx_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfsgnjx.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -70,12 +71,13 @@ , , , + i32, i32); define @intrinsic_vfsgnjx_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfsgnjx.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -114,12 +116,13 @@ , , , + i32, i32); define @intrinsic_vfsgnjx_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfsgnjx.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -158,12 +161,13 @@ , , , + i32, i32); define @intrinsic_vfsgnjx_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfsgnjx.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -202,12 +206,13 @@ , , , + i32, i32); define @intrinsic_vfsgnjx_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfsgnjx.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -246,13 +251,14 @@ , , , + i32, i32); define @intrinsic_vfsgnjx_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vfsgnjx.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -261,7 +267,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -291,12 +297,13 @@ , , , + i32, i32); define @intrinsic_vfsgnjx_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfsgnjx.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -335,12 +342,13 @@ , , , + i32, i32); define @intrinsic_vfsgnjx_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfsgnjx.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -379,12 +387,13 @@ , , , + i32, i32); define @intrinsic_vfsgnjx_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfsgnjx.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -423,12 +432,13 @@ , , , + i32, i32); define @intrinsic_vfsgnjx_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfsgnjx.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -467,13 +477,14 @@ , , , + i32, i32); define @intrinsic_vfsgnjx_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vfsgnjx.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -482,7 +493,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -512,12 +523,13 @@ , , , + i32, i32); define @intrinsic_vfsgnjx_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfsgnjx.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -526,7 +538,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -556,12 +568,13 @@ , , , + i32, i32); define @intrinsic_vfsgnjx_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfsgnjx.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -600,12 +613,13 @@ , , , + i32, i32); define @intrinsic_vfsgnjx_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfsgnjx.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -644,13 +658,14 @@ , , , + i32, i32); define @intrinsic_vfsgnjx_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vfsgnjx.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -659,7 +674,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -690,13 +705,14 @@ , half, , + i32, i32); define @intrinsic_vfsgnjx_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vfsgnjx.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -705,7 +721,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -736,13 +752,14 @@ , half, , + i32, i32); define @intrinsic_vfsgnjx_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vfsgnjx.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -751,7 +768,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -782,13 +799,14 @@ , half, , + i32, i32); define @intrinsic_vfsgnjx_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vfsgnjx.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -797,7 +815,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -828,13 +846,14 @@ , half, , + i32, i32); define @intrinsic_vfsgnjx_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vfsgnjx.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -843,7 +862,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -874,13 +893,14 @@ , half, , + i32, i32); define @intrinsic_vfsgnjx_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vfsgnjx.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -889,7 +909,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -920,13 +940,14 @@ , half, , + i32, i32); define @intrinsic_vfsgnjx_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vfsgnjx.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -935,7 +956,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -966,13 +987,14 @@ , float, , + i32, i32); define @intrinsic_vfsgnjx_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vfsgnjx.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -981,7 +1003,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1012,13 +1034,14 @@ , float, , + i32, i32); define @intrinsic_vfsgnjx_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vfsgnjx.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1027,7 +1050,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1058,13 +1081,14 @@ , float, , + i32, i32); define @intrinsic_vfsgnjx_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vfsgnjx.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1073,7 +1097,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1104,13 +1128,14 @@ , float, , + i32, i32); define @intrinsic_vfsgnjx_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vfsgnjx.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1119,7 +1144,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1150,13 +1175,14 @@ , float, , + i32, i32); define @intrinsic_vfsgnjx_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vfsgnjx.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1165,7 +1191,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1200,6 +1226,7 @@ , double, , + i32, i32); define @intrinsic_vfsgnjx_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { @@ -1209,7 +1236,7 @@ ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vfsgnjx.vf v8, v9, ft0, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1219,7 +1246,7 @@ %1, double %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1254,6 +1281,7 @@ , double, , + i32, i32); define @intrinsic_vfsgnjx_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { @@ -1263,7 +1291,7 @@ ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vfsgnjx.vf v8, v10, ft0, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1273,7 +1301,7 @@ %1, double %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1308,6 +1336,7 @@ , double, , + i32, i32); define @intrinsic_vfsgnjx_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { @@ -1317,7 +1346,7 @@ ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vfsgnjx.vf v8, v12, ft0, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1327,7 +1356,7 @@ %1, double %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1362,6 +1391,7 @@ , double, , + i32, i32); define @intrinsic_vfsgnjx_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { @@ -1371,7 +1401,7 @@ ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vfsgnjx.vf v8, v16, ft0, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1381,7 +1411,7 @@ %1, double %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv64.ll @@ -26,12 +26,13 @@ , , , + i64, i64); define @intrinsic_vfsgnjx_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfsgnjx.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -70,12 +71,13 @@ , , , + i64, i64); define @intrinsic_vfsgnjx_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfsgnjx.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -114,12 +116,13 @@ , , , + i64, i64); define @intrinsic_vfsgnjx_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfsgnjx.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -158,12 +161,13 @@ , , , + i64, i64); define @intrinsic_vfsgnjx_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfsgnjx.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -202,12 +206,13 @@ , , , + i64, i64); define @intrinsic_vfsgnjx_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfsgnjx.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -246,13 +251,14 @@ , , , + i64, i64); define @intrinsic_vfsgnjx_mask_vv_nxv32f16_nxv32f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vfsgnjx.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -261,7 +267,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -291,12 +297,13 @@ , , , + i64, i64); define @intrinsic_vfsgnjx_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfsgnjx.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -335,12 +342,13 @@ , , , + i64, i64); define @intrinsic_vfsgnjx_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfsgnjx.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -379,12 +387,13 @@ , , , + i64, i64); define @intrinsic_vfsgnjx_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfsgnjx.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -423,12 +432,13 @@ , , , + i64, i64); define @intrinsic_vfsgnjx_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfsgnjx.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -467,13 +477,14 @@ , , , + i64, i64); define @intrinsic_vfsgnjx_mask_vv_nxv16f32_nxv16f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vfsgnjx.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -482,7 +493,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -512,12 +523,13 @@ , , , + i64, i64); define @intrinsic_vfsgnjx_mask_vv_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfsgnjx.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -526,7 +538,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -556,12 +568,13 @@ , , , + i64, i64); define @intrinsic_vfsgnjx_mask_vv_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfsgnjx.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -600,12 +613,13 @@ , , , + i64, i64); define @intrinsic_vfsgnjx_mask_vv_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfsgnjx.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -644,13 +658,14 @@ , , , + i64, i64); define @intrinsic_vfsgnjx_mask_vv_nxv8f64_nxv8f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vfsgnjx.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -659,7 +674,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -690,13 +705,14 @@ , half, , + i64, i64); define @intrinsic_vfsgnjx_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vfsgnjx.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -705,7 +721,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -736,13 +752,14 @@ , half, , + i64, i64); define @intrinsic_vfsgnjx_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vfsgnjx.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -751,7 +768,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -782,13 +799,14 @@ , half, , + i64, i64); define @intrinsic_vfsgnjx_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vfsgnjx.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -797,7 +815,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -828,13 +846,14 @@ , half, , + i64, i64); define @intrinsic_vfsgnjx_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vfsgnjx.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -843,7 +862,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -874,13 +893,14 @@ , half, , + i64, i64); define @intrinsic_vfsgnjx_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vfsgnjx.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -889,7 +909,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -920,13 +940,14 @@ , half, , + i64, i64); define @intrinsic_vfsgnjx_mask_vf_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vfsgnjx.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -935,7 +956,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -966,13 +987,14 @@ , float, , + i64, i64); define @intrinsic_vfsgnjx_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vfsgnjx.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -981,7 +1003,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1012,13 +1034,14 @@ , float, , + i64, i64); define @intrinsic_vfsgnjx_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vfsgnjx.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1027,7 +1050,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1058,13 +1081,14 @@ , float, , + i64, i64); define @intrinsic_vfsgnjx_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vfsgnjx.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1073,7 +1097,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1104,13 +1128,14 @@ , float, , + i64, i64); define @intrinsic_vfsgnjx_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vfsgnjx.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1119,7 +1144,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1150,13 +1175,14 @@ , float, , + i64, i64); define @intrinsic_vfsgnjx_mask_vf_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vfsgnjx.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1165,7 +1191,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1196,13 +1222,14 @@ , double, , + i64, i64); define @intrinsic_vfsgnjx_mask_vf_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vfsgnjx.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1211,7 +1238,7 @@ %1, double %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1242,13 +1269,14 @@ , double, , + i64, i64); define @intrinsic_vfsgnjx_mask_vf_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vfsgnjx.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1257,7 +1285,7 @@ %1, double %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1288,13 +1316,14 @@ , double, , + i64, i64); define @intrinsic_vfsgnjx_mask_vf_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vfsgnjx.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1303,7 +1332,7 @@ %1, double %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1334,13 +1363,14 @@ , double, , + i64, i64); define @intrinsic_vfsgnjx_mask_vf_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vfsgnjx.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1349,7 +1379,7 @@ %1, double %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv32.ll @@ -27,13 +27,14 @@ , half, , + i32, i32); define @intrinsic_vfslide1down_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vfslide1down.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -42,7 +43,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -73,13 +74,14 @@ , half, , + i32, i32); define @intrinsic_vfslide1down_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vfslide1down.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -88,7 +90,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -119,13 +121,14 @@ , half, , + i32, i32); define @intrinsic_vfslide1down_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vfslide1down.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -134,7 +137,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -165,13 +168,14 @@ , half, , + i32, i32); define @intrinsic_vfslide1down_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vfslide1down.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -180,7 +184,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -211,13 +215,14 @@ , half, , + i32, i32); define @intrinsic_vfslide1down_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vfslide1down.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -226,7 +231,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -257,13 +262,14 @@ , half, , + i32, i32); define @intrinsic_vfslide1down_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vfslide1down.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -272,7 +278,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -303,13 +309,14 @@ , float, , + i32, i32); define @intrinsic_vfslide1down_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vfslide1down.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -318,7 +325,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -349,13 +356,14 @@ , float, , + i32, i32); define @intrinsic_vfslide1down_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vfslide1down.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -364,7 +372,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -395,13 +403,14 @@ , float, , + i32, i32); define @intrinsic_vfslide1down_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vfslide1down.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -410,7 +419,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -441,13 +450,14 @@ , float, , + i32, i32); define @intrinsic_vfslide1down_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vfslide1down.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -456,7 +466,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -487,13 +497,14 @@ , float, , + i32, i32); define @intrinsic_vfslide1down_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vfslide1down.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -502,7 +513,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -537,6 +548,7 @@ , double, , + i32, i32); define @intrinsic_vfslide1down_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { @@ -546,7 +558,7 @@ ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vfslide1down.vf v8, v9, ft0, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -556,7 +568,7 @@ %1, double %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -591,6 +603,7 @@ , double, , + i32, i32); define @intrinsic_vfslide1down_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { @@ -600,7 +613,7 @@ ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vfslide1down.vf v8, v10, ft0, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -610,7 +623,7 @@ %1, double %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -645,6 +658,7 @@ , double, , + i32, i32); define @intrinsic_vfslide1down_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { @@ -654,7 +668,7 @@ ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vfslide1down.vf v8, v12, ft0, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -664,7 +678,7 @@ %1, double %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -699,6 +713,7 @@ , double, , + i32, i32); define @intrinsic_vfslide1down_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { @@ -708,7 +723,7 @@ ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vfslide1down.vf v8, v16, ft0, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -718,7 +733,7 @@ %1, double %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv64.ll @@ -27,13 +27,14 @@ , half, , + i64, i64); define @intrinsic_vfslide1down_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vfslide1down.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -42,7 +43,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -73,13 +74,14 @@ , half, , + i64, i64); define @intrinsic_vfslide1down_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vfslide1down.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -88,7 +90,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -119,13 +121,14 @@ , half, , + i64, i64); define @intrinsic_vfslide1down_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vfslide1down.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -134,7 +137,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -165,13 +168,14 @@ , half, , + i64, i64); define @intrinsic_vfslide1down_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vfslide1down.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -180,7 +184,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -211,13 +215,14 @@ , half, , + i64, i64); define @intrinsic_vfslide1down_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vfslide1down.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -226,7 +231,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -257,13 +262,14 @@ , half, , + i64, i64); define @intrinsic_vfslide1down_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vfslide1down.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -272,7 +278,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -303,13 +309,14 @@ , float, , + i64, i64); define @intrinsic_vfslide1down_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vfslide1down.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -318,7 +325,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -349,13 +356,14 @@ , float, , + i64, i64); define @intrinsic_vfslide1down_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vfslide1down.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -364,7 +372,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -395,13 +403,14 @@ , float, , + i64, i64); define @intrinsic_vfslide1down_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vfslide1down.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -410,7 +419,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -441,13 +450,14 @@ , float, , + i64, i64); define @intrinsic_vfslide1down_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vfslide1down.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -456,7 +466,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -487,13 +497,14 @@ , float, , + i64, i64); define @intrinsic_vfslide1down_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vfslide1down.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -502,7 +513,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -533,13 +544,14 @@ , double, , + i64, i64); define @intrinsic_vfslide1down_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vfslide1down.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -548,7 +560,7 @@ %1, double %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -579,13 +591,14 @@ , double, , + i64, i64); define @intrinsic_vfslide1down_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vfslide1down.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -594,7 +607,7 @@ %1, double %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -625,13 +638,14 @@ , double, , + i64, i64); define @intrinsic_vfslide1down_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vfslide1down.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -640,7 +654,7 @@ %1, double %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -671,13 +685,14 @@ , double, , + i64, i64); define @intrinsic_vfslide1down_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vfslide1down.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -686,7 +701,7 @@ %1, double %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv32.ll @@ -28,13 +28,14 @@ , half, , + i32, i32); define @intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vfslide1up.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -43,7 +44,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -75,13 +76,14 @@ , half, , + i32, i32); define @intrinsic_vfslide1up_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vfslide1up.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -90,7 +92,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -122,13 +124,14 @@ , half, , + i32, i32); define @intrinsic_vfslide1up_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vfslide1up.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -137,7 +140,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -169,13 +172,14 @@ , half, , + i32, i32); define @intrinsic_vfslide1up_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vfslide1up.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -184,7 +188,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -216,13 +220,14 @@ , half, , + i32, i32); define @intrinsic_vfslide1up_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vfslide1up.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -231,7 +236,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -263,13 +268,14 @@ , half, , + i32, i32); define @intrinsic_vfslide1up_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vfslide1up.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -278,7 +284,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -310,13 +316,14 @@ , float, , + i32, i32); define @intrinsic_vfslide1up_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vfslide1up.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -325,7 +332,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -357,13 +364,14 @@ , float, , + i32, i32); define @intrinsic_vfslide1up_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vfslide1up.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -372,7 +380,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -404,13 +412,14 @@ , float, , + i32, i32); define @intrinsic_vfslide1up_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vfslide1up.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -419,7 +428,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -451,13 +460,14 @@ , float, , + i32, i32); define @intrinsic_vfslide1up_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vfslide1up.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -466,7 +476,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -498,13 +508,14 @@ , float, , + i32, i32); define @intrinsic_vfslide1up_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vfslide1up.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -513,7 +524,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -549,6 +560,7 @@ , double, , + i32, i32); define @intrinsic_vfslide1up_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { @@ -558,7 +570,7 @@ ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vfslide1up.vf v8, v9, ft0, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -568,7 +580,7 @@ %1, double %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -604,6 +616,7 @@ , double, , + i32, i32); define @intrinsic_vfslide1up_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { @@ -613,7 +626,7 @@ ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vfslide1up.vf v8, v10, ft0, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -623,7 +636,7 @@ %1, double %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -659,6 +672,7 @@ , double, , + i32, i32); define @intrinsic_vfslide1up_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { @@ -668,7 +682,7 @@ ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vfslide1up.vf v8, v12, ft0, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -678,7 +692,7 @@ %1, double %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -714,6 +728,7 @@ , double, , + i32, i32); define @intrinsic_vfslide1up_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { @@ -723,7 +738,7 @@ ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vfslide1up.vf v8, v16, ft0, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -733,7 +748,7 @@ %1, double %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv64.ll @@ -28,13 +28,14 @@ , half, , + i64, i64); define @intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vfslide1up.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -43,7 +44,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -75,13 +76,14 @@ , half, , + i64, i64); define @intrinsic_vfslide1up_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vfslide1up.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -90,7 +92,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -122,13 +124,14 @@ , half, , + i64, i64); define @intrinsic_vfslide1up_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vfslide1up.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -137,7 +140,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -169,13 +172,14 @@ , half, , + i64, i64); define @intrinsic_vfslide1up_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vfslide1up.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -184,7 +188,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -216,13 +220,14 @@ , half, , + i64, i64); define @intrinsic_vfslide1up_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vfslide1up.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -231,7 +236,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -263,13 +268,14 @@ , half, , + i64, i64); define @intrinsic_vfslide1up_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vfslide1up.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -278,7 +284,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -310,13 +316,14 @@ , float, , + i64, i64); define @intrinsic_vfslide1up_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vfslide1up.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -325,7 +332,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -357,13 +364,14 @@ , float, , + i64, i64); define @intrinsic_vfslide1up_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vfslide1up.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -372,7 +380,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -404,13 +412,14 @@ , float, , + i64, i64); define @intrinsic_vfslide1up_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vfslide1up.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -419,7 +428,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -451,13 +460,14 @@ , float, , + i64, i64); define @intrinsic_vfslide1up_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vfslide1up.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -466,7 +476,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -498,13 +508,14 @@ , float, , + i64, i64); define @intrinsic_vfslide1up_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vfslide1up.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -513,7 +524,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -545,13 +556,14 @@ , double, , + i64, i64); define @intrinsic_vfslide1up_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vfslide1up.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -560,7 +572,7 @@ %1, double %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -592,13 +604,14 @@ , double, , + i64, i64); define @intrinsic_vfslide1up_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vfslide1up.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -607,7 +620,7 @@ %1, double %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -639,13 +652,14 @@ , double, , + i64, i64); define @intrinsic_vfslide1up_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vfslide1up.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -654,7 +668,7 @@ %1, double %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -686,13 +700,14 @@ , double, , + i64, i64); define @intrinsic_vfslide1up_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vfslide1up.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -701,7 +716,7 @@ %1, double %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfsqrt-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsqrt-rv32.ll @@ -23,12 +23,13 @@ , , , + i32, i32); define @intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -36,7 +37,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -63,12 +64,13 @@ , , , + i32, i32); define @intrinsic_vfsqrt_mask_v_nxv2f16_nxv2f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -76,7 +78,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -103,12 +105,13 @@ , , , + i32, i32); define @intrinsic_vfsqrt_mask_v_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -116,7 +119,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -143,12 +146,13 @@ , , , + i32, i32); define @intrinsic_vfsqrt_mask_v_nxv8f16_nxv8f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -156,7 +160,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -183,12 +187,13 @@ , , , + i32, i32); define @intrinsic_vfsqrt_mask_v_nxv16f16_nxv16f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -196,7 +201,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -241,12 +246,13 @@ , , , + i32, i32); define @intrinsic_vfsqrt_mask_v_nxv1f32_nxv1f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -254,7 +260,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -281,12 +287,13 @@ , , , + i32, i32); define @intrinsic_vfsqrt_mask_v_nxv2f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -294,7 +301,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -321,12 +328,13 @@ , , , + i32, i32); define @intrinsic_vfsqrt_mask_v_nxv4f32_nxv4f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -334,7 +342,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -361,12 +369,13 @@ , , , + i32, i32); define @intrinsic_vfsqrt_mask_v_nxv8f32_nxv8f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -374,7 +383,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -419,12 +428,13 @@ , , , + i32, i32); define @intrinsic_vfsqrt_mask_v_nxv1f64_nxv1f64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -432,7 +442,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -459,12 +469,13 @@ , , , + i32, i32); define @intrinsic_vfsqrt_mask_v_nxv2f64_nxv2f64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -472,7 +483,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -499,12 +510,13 @@ , , , + i32, i32); define @intrinsic_vfsqrt_mask_v_nxv4f64_nxv4f64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -512,7 +524,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfsqrt-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsqrt-rv64.ll @@ -25,12 +25,13 @@ , , , + i64, i64); define @intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16( ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v9, v0.t ; CHECK-NEXT: ret %0, @@ -42,7 +43,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -71,12 +72,13 @@ , , , + i64, i64); define @intrinsic_vfsqrt_mask_v_nxv2f16_nxv2f16( ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v9, v0.t ; CHECK-NEXT: ret %0, @@ -88,7 +90,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -117,12 +119,13 @@ , , , + i64, i64); define @intrinsic_vfsqrt_mask_v_nxv4f16_nxv4f16( ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v9, v0.t ; CHECK-NEXT: ret %0, @@ -134,7 +137,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -163,12 +166,13 @@ , , , + i64, i64); define @intrinsic_vfsqrt_mask_v_nxv8f16_nxv8f16( ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v10, v0.t ; CHECK-NEXT: ret %0, @@ -180,7 +184,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -209,12 +213,13 @@ , , , + i64, i64); define @intrinsic_vfsqrt_mask_v_nxv16f16_nxv16f16( ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v12, v0.t ; CHECK-NEXT: ret %0, @@ -226,7 +231,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -255,12 +260,13 @@ , , , + i64, i64); define @intrinsic_vfsqrt_mask_v_nxv32f16_nxv32f16( ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v16, v0.t ; CHECK-NEXT: ret %0, @@ -272,7 +278,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -301,12 +307,13 @@ , , , + i64, i64); define @intrinsic_vfsqrt_mask_v_nxv1f32_nxv1f32( ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v9, v0.t ; CHECK-NEXT: ret %0, @@ -318,7 +325,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -347,12 +354,13 @@ , , , + i64, i64); define @intrinsic_vfsqrt_mask_v_nxv2f32_nxv2f32( ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v9, v0.t ; CHECK-NEXT: ret %0, @@ -364,7 +372,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -393,12 +401,13 @@ , , , + i64, i64); define @intrinsic_vfsqrt_mask_v_nxv4f32_nxv4f32( ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v10, v0.t ; CHECK-NEXT: ret %0, @@ -410,7 +419,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -439,12 +448,13 @@ , , , + i64, i64); define @intrinsic_vfsqrt_mask_v_nxv8f32_nxv8f32( ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v12, v0.t ; CHECK-NEXT: ret %0, @@ -456,7 +466,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -485,12 +495,13 @@ , , , + i64, i64); define @intrinsic_vfsqrt_mask_v_nxv16f32_nxv16f32( ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v16, v0.t ; CHECK-NEXT: ret %0, @@ -502,7 +513,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -531,12 +542,13 @@ , , , + i64, i64); define @intrinsic_vfsqrt_mask_v_nxv1f64_nxv1f64( ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v9, v0.t ; CHECK-NEXT: ret %0, @@ -548,7 +560,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -577,12 +589,13 @@ , , , + i64, i64); define @intrinsic_vfsqrt_mask_v_nxv2f64_nxv2f64( ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v10, v0.t ; CHECK-NEXT: ret %0, @@ -594,7 +607,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -623,12 +636,13 @@ , , , + i64, i64); define @intrinsic_vfsqrt_mask_v_nxv4f64_nxv4f64( ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v12, v0.t ; CHECK-NEXT: ret %0, @@ -640,7 +654,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -669,12 +683,13 @@ , , , + i64, i64); define @intrinsic_vfsqrt_mask_v_nxv8f64_nxv8f64( ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v16, v0.t ; CHECK-NEXT: ret %0, @@ -686,7 +701,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfsub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsub-rv32.ll @@ -26,12 +26,13 @@ , , , + i32, i32); define @intrinsic_vfsub_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfsub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -70,12 +71,13 @@ , , , + i32, i32); define @intrinsic_vfsub_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfsub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -114,12 +116,13 @@ , , , + i32, i32); define @intrinsic_vfsub_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfsub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -158,12 +161,13 @@ , , , + i32, i32); define @intrinsic_vfsub_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfsub.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -202,12 +206,13 @@ , , , + i32, i32); define @intrinsic_vfsub_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfsub.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -246,13 +251,14 @@ , , , + i32, i32); define @intrinsic_vfsub_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vfsub.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -261,7 +267,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -291,12 +297,13 @@ , , , + i32, i32); define @intrinsic_vfsub_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfsub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -335,12 +342,13 @@ , , , + i32, i32); define @intrinsic_vfsub_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfsub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -379,12 +387,13 @@ , , , + i32, i32); define @intrinsic_vfsub_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfsub.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -423,12 +432,13 @@ , , , + i32, i32); define @intrinsic_vfsub_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfsub.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -467,13 +477,14 @@ , , , + i32, i32); define @intrinsic_vfsub_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vfsub.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -482,7 +493,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -512,12 +523,13 @@ , , , + i32, i32); define @intrinsic_vfsub_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfsub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -526,7 +538,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -556,12 +568,13 @@ , , , + i32, i32); define @intrinsic_vfsub_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfsub.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -600,12 +613,13 @@ , , , + i32, i32); define @intrinsic_vfsub_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfsub.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -644,13 +658,14 @@ , , , + i32, i32); define @intrinsic_vfsub_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vfsub.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -659,7 +674,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -690,13 +705,14 @@ , half, , + i32, i32); define @intrinsic_vfsub_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vfsub.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -705,7 +721,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -736,13 +752,14 @@ , half, , + i32, i32); define @intrinsic_vfsub_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vfsub.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -751,7 +768,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -782,13 +799,14 @@ , half, , + i32, i32); define @intrinsic_vfsub_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vfsub.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -797,7 +815,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -828,13 +846,14 @@ , half, , + i32, i32); define @intrinsic_vfsub_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vfsub.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -843,7 +862,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -874,13 +893,14 @@ , half, , + i32, i32); define @intrinsic_vfsub_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vfsub.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -889,7 +909,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -920,13 +940,14 @@ , half, , + i32, i32); define @intrinsic_vfsub_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vfsub.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -935,7 +956,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -966,13 +987,14 @@ , float, , + i32, i32); define @intrinsic_vfsub_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vfsub.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -981,7 +1003,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1012,13 +1034,14 @@ , float, , + i32, i32); define @intrinsic_vfsub_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vfsub.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1027,7 +1050,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1058,13 +1081,14 @@ , float, , + i32, i32); define @intrinsic_vfsub_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vfsub.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1073,7 +1097,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1104,13 +1128,14 @@ , float, , + i32, i32); define @intrinsic_vfsub_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vfsub.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1119,7 +1144,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1150,13 +1175,14 @@ , float, , + i32, i32); define @intrinsic_vfsub_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vfsub.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1165,7 +1191,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1200,6 +1226,7 @@ , double, , + i32, i32); define @intrinsic_vfsub_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { @@ -1209,7 +1236,7 @@ ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vfsub.vf v8, v9, ft0, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1219,7 +1246,7 @@ %1, double %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1254,6 +1281,7 @@ , double, , + i32, i32); define @intrinsic_vfsub_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { @@ -1263,7 +1291,7 @@ ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vfsub.vf v8, v10, ft0, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1273,7 +1301,7 @@ %1, double %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1308,6 +1336,7 @@ , double, , + i32, i32); define @intrinsic_vfsub_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { @@ -1317,7 +1346,7 @@ ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vfsub.vf v8, v12, ft0, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1327,7 +1356,7 @@ %1, double %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1362,6 +1391,7 @@ , double, , + i32, i32); define @intrinsic_vfsub_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { @@ -1371,7 +1401,7 @@ ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vfsub.vf v8, v16, ft0, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1381,7 +1411,7 @@ %1, double %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfsub-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsub-rv64.ll @@ -27,12 +27,13 @@ , , , + i64, i64); define @intrinsic_vfsub_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfsub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -41,7 +42,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -71,12 +72,13 @@ , , , + i64, i64); define @intrinsic_vfsub_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfsub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -85,7 +87,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -115,12 +117,13 @@ , , , + i64, i64); define @intrinsic_vfsub_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfsub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -129,7 +132,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -159,12 +162,13 @@ , , , + i64, i64); define @intrinsic_vfsub_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfsub.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -173,7 +177,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -203,12 +207,13 @@ , , , + i64, i64); define @intrinsic_vfsub_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfsub.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -217,7 +222,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -247,13 +252,14 @@ , , , + i64, i64); define @intrinsic_vfsub_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vfsub.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -262,7 +268,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -292,12 +298,13 @@ , , , + i64, i64); define @intrinsic_vfsub_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfsub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -306,7 +313,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -336,12 +343,13 @@ , , , + i64, i64); define @intrinsic_vfsub_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfsub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -350,7 +358,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -380,12 +388,13 @@ , , , + i64, i64); define @intrinsic_vfsub_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfsub.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -394,7 +403,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -424,12 +433,13 @@ , , , + i64, i64); define @intrinsic_vfsub_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfsub.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -438,7 +448,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -468,13 +478,14 @@ , , , + i64, i64); define @intrinsic_vfsub_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vfsub.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -483,7 +494,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -513,12 +524,13 @@ , , , + i64, i64); define @intrinsic_vfsub_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfsub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -527,7 +539,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -557,12 +569,13 @@ , , , + i64, i64); define @intrinsic_vfsub_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfsub.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -571,7 +584,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -601,12 +614,13 @@ , , , + i64, i64); define @intrinsic_vfsub_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfsub.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -615,7 +629,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -645,13 +659,14 @@ , , , + i64, i64); define @intrinsic_vfsub_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vfsub.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -660,7 +675,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -691,13 +706,14 @@ , half, , + i64, i64); define @intrinsic_vfsub_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vfsub.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -706,7 +722,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -737,13 +753,14 @@ , half, , + i64, i64); define @intrinsic_vfsub_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vfsub.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -752,7 +769,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -783,13 +800,14 @@ , half, , + i64, i64); define @intrinsic_vfsub_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vfsub.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -798,7 +816,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -829,13 +847,14 @@ , half, , + i64, i64); define @intrinsic_vfsub_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vfsub.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -844,7 +863,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -875,13 +894,14 @@ , half, , + i64, i64); define @intrinsic_vfsub_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vfsub.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -890,7 +910,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -921,13 +941,14 @@ , half, , + i64, i64); define @intrinsic_vfsub_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vfsub.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -936,7 +957,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -967,13 +988,14 @@ , float, , + i64, i64); define @intrinsic_vfsub_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vfsub.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -982,7 +1004,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1013,13 +1035,14 @@ , float, , + i64, i64); define @intrinsic_vfsub_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vfsub.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1028,7 +1051,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1059,13 +1082,14 @@ , float, , + i64, i64); define @intrinsic_vfsub_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vfsub.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1074,7 +1098,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1105,13 +1129,14 @@ , float, , + i64, i64); define @intrinsic_vfsub_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vfsub.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1120,7 +1145,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1151,13 +1176,14 @@ , float, , + i64, i64); define @intrinsic_vfsub_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vfsub.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1166,7 +1192,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1197,13 +1223,14 @@ , double, , + i64, i64); define @intrinsic_vfsub_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vfsub.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1212,7 +1239,7 @@ %1, double %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1243,13 +1270,14 @@ , double, , + i64, i64); define @intrinsic_vfsub_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vfsub.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1258,7 +1286,7 @@ %1, double %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1289,13 +1317,14 @@ , double, , + i64, i64); define @intrinsic_vfsub_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vfsub.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1304,7 +1333,7 @@ %1, double %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1335,13 +1364,14 @@ , double, , + i64, i64); define @intrinsic_vfsub_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vfsub.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1350,7 +1380,7 @@ %1, double %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv32.ll @@ -27,12 +27,13 @@ , , , + i32, i32); define @intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfwadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -41,7 +42,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -72,12 +73,13 @@ , , , + i32, i32); define @intrinsic_vfwadd_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfwadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -86,7 +88,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -117,12 +119,13 @@ , , , + i32, i32); define @intrinsic_vfwadd_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfwadd.vv v8, v10, v11, v0.t ; CHECK-NEXT: ret entry: @@ -131,7 +134,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -162,12 +165,13 @@ , , , + i32, i32); define @intrinsic_vfwadd_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfwadd.vv v8, v12, v14, v0.t ; CHECK-NEXT: ret entry: @@ -176,7 +180,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -207,12 +211,13 @@ , , , + i32, i32); define @intrinsic_vfwadd_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv16f32_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfwadd.vv v8, v16, v20, v0.t ; CHECK-NEXT: ret entry: @@ -221,7 +226,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -252,12 +257,13 @@ , , , + i32, i32); define @intrinsic_vfwadd_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfwadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -266,7 +272,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -297,12 +303,13 @@ , , , + i32, i32); define @intrinsic_vfwadd_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfwadd.vv v8, v10, v11, v0.t ; CHECK-NEXT: ret entry: @@ -311,7 +318,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -342,12 +349,13 @@ , , , + i32, i32); define @intrinsic_vfwadd_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfwadd.vv v8, v12, v14, v0.t ; CHECK-NEXT: ret entry: @@ -356,7 +364,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -387,12 +395,13 @@ , , , + i32, i32); define @intrinsic_vfwadd_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv8f64_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfwadd.vv v8, v16, v20, v0.t ; CHECK-NEXT: ret entry: @@ -401,7 +410,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -433,13 +442,14 @@ , half, , + i32, i32); define @intrinsic_vfwadd_mask_vf_nxv1f32_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv1f32_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vfwadd.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -448,7 +458,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -480,13 +490,14 @@ , half, , + i32, i32); define @intrinsic_vfwadd_mask_vf_nxv2f32_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv2f32_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vfwadd.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -495,7 +506,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -527,13 +538,14 @@ , half, , + i32, i32); define @intrinsic_vfwadd_mask_vf_nxv4f32_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv4f32_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vfwadd.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -542,7 +554,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -574,13 +586,14 @@ , half, , + i32, i32); define @intrinsic_vfwadd_mask_vf_nxv8f32_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv8f32_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vfwadd.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -589,7 +602,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -621,13 +634,14 @@ , half, , + i32, i32); define @intrinsic_vfwadd_mask_vf_nxv16f32_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv16f32_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vfwadd.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -636,7 +650,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -668,13 +682,14 @@ , float, , + i32, i32); define @intrinsic_vfwadd_mask_vf_nxv1f64_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv1f64_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vfwadd.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -683,7 +698,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -715,13 +730,14 @@ , float, , + i32, i32); define @intrinsic_vfwadd_mask_vf_nxv2f64_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv2f64_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vfwadd.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -730,7 +746,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -762,13 +778,14 @@ , float, , + i32, i32); define @intrinsic_vfwadd_mask_vf_nxv4f64_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv4f64_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vfwadd.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -777,7 +794,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -809,13 +826,14 @@ , float, , + i32, i32); define @intrinsic_vfwadd_mask_vf_nxv8f64_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv8f64_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vfwadd.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -824,7 +842,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv64.ll @@ -27,12 +27,13 @@ , , , + i64, i64); define @intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfwadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -41,7 +42,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -72,12 +73,13 @@ , , , + i64, i64); define @intrinsic_vfwadd_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfwadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -86,7 +88,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -117,12 +119,13 @@ , , , + i64, i64); define @intrinsic_vfwadd_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfwadd.vv v8, v10, v11, v0.t ; CHECK-NEXT: ret entry: @@ -131,7 +134,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -162,12 +165,13 @@ , , , + i64, i64); define @intrinsic_vfwadd_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfwadd.vv v8, v12, v14, v0.t ; CHECK-NEXT: ret entry: @@ -176,7 +180,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -207,12 +211,13 @@ , , , + i64, i64); define @intrinsic_vfwadd_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv16f32_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfwadd.vv v8, v16, v20, v0.t ; CHECK-NEXT: ret entry: @@ -221,7 +226,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -252,12 +257,13 @@ , , , + i64, i64); define @intrinsic_vfwadd_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfwadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -266,7 +272,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -297,12 +303,13 @@ , , , + i64, i64); define @intrinsic_vfwadd_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfwadd.vv v8, v10, v11, v0.t ; CHECK-NEXT: ret entry: @@ -311,7 +318,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -342,12 +349,13 @@ , , , + i64, i64); define @intrinsic_vfwadd_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfwadd.vv v8, v12, v14, v0.t ; CHECK-NEXT: ret entry: @@ -356,7 +364,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -387,12 +395,13 @@ , , , + i64, i64); define @intrinsic_vfwadd_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv8f64_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfwadd.vv v8, v16, v20, v0.t ; CHECK-NEXT: ret entry: @@ -401,7 +410,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -433,13 +442,14 @@ , half, , + i64, i64); define @intrinsic_vfwadd_mask_vf_nxv1f32_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv1f32_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vfwadd.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -448,7 +458,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -480,13 +490,14 @@ , half, , + i64, i64); define @intrinsic_vfwadd_mask_vf_nxv2f32_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv2f32_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vfwadd.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -495,7 +506,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -527,13 +538,14 @@ , half, , + i64, i64); define @intrinsic_vfwadd_mask_vf_nxv4f32_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv4f32_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vfwadd.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -542,7 +554,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -574,13 +586,14 @@ , half, , + i64, i64); define @intrinsic_vfwadd_mask_vf_nxv8f32_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv8f32_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vfwadd.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -589,7 +602,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -621,13 +634,14 @@ , half, , + i64, i64); define @intrinsic_vfwadd_mask_vf_nxv16f32_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv16f32_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vfwadd.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -636,7 +650,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -668,13 +682,14 @@ , float, , + i64, i64); define @intrinsic_vfwadd_mask_vf_nxv1f64_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv1f64_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vfwadd.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -683,7 +698,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -715,13 +730,14 @@ , float, , + i64, i64); define @intrinsic_vfwadd_mask_vf_nxv2f64_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv2f64_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vfwadd.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -730,7 +746,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -762,13 +778,14 @@ , float, , + i64, i64); define @intrinsic_vfwadd_mask_vf_nxv4f64_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv4f64_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vfwadd.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -777,7 +794,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -809,13 +826,14 @@ , float, , + i64, i64); define @intrinsic_vfwadd_mask_vf_nxv8f64_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv8f64_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vfwadd.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -824,7 +842,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv32.ll @@ -26,12 +26,13 @@ , , , + i32, i32); define @intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f32_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f32_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfwadd.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -70,12 +71,13 @@ , , , + i32, i32); define @intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f32_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f32_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfwadd.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -114,12 +116,13 @@ , , , + i32, i32); define @intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f32_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f32_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfwadd.wv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -158,12 +161,13 @@ , , , + i32, i32); define @intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f32_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f32_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfwadd.wv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -202,13 +206,14 @@ , , , + i32, i32); define @intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f32_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f32_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl4re16.v v28, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vfwadd.wv v8, v16, v28, v0.t ; CHECK-NEXT: ret entry: @@ -217,7 +222,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -247,12 +252,13 @@ , , , + i32, i32); define @intrinsic_vfwadd.w_mask_wv_nxv1f64_nxv1f64_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv1f64_nxv1f64_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfwadd.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -261,7 +267,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -291,12 +297,13 @@ , , , + i32, i32); define @intrinsic_vfwadd.w_mask_wv_nxv2f64_nxv2f64_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv2f64_nxv2f64_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfwadd.wv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -335,12 +342,13 @@ , , , + i32, i32); define @intrinsic_vfwadd.w_mask_wv_nxv4f64_nxv4f64_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv4f64_nxv4f64_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfwadd.wv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -379,13 +387,14 @@ , , , + i32, i32); define @intrinsic_vfwadd.w_mask_wv_nxv8f64_nxv8f64_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv8f64_nxv8f64_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl4re32.v v28, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vfwadd.wv v8, v16, v28, v0.t ; CHECK-NEXT: ret entry: @@ -394,7 +403,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -425,13 +434,14 @@ , half, , + i32, i32); define @intrinsic_vfwadd.w_mask_wf_nxv1f32_nxv1f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv1f32_nxv1f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vfwadd.wf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -440,7 +450,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -471,13 +481,14 @@ , half, , + i32, i32); define @intrinsic_vfwadd.w_mask_wf_nxv2f32_nxv2f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv2f32_nxv2f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vfwadd.wf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -486,7 +497,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -517,13 +528,14 @@ , half, , + i32, i32); define @intrinsic_vfwadd.w_mask_wf_nxv4f32_nxv4f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv4f32_nxv4f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vfwadd.wf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -532,7 +544,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -563,13 +575,14 @@ , half, , + i32, i32); define @intrinsic_vfwadd.w_mask_wf_nxv8f32_nxv8f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv8f32_nxv8f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vfwadd.wf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -578,7 +591,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -609,13 +622,14 @@ , half, , + i32, i32); define @intrinsic_vfwadd.w_mask_wf_nxv16f32_nxv16f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv16f32_nxv16f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vfwadd.wf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -624,7 +638,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -655,13 +669,14 @@ , float, , + i32, i32); define @intrinsic_vfwadd.w_mask_wf_nxv1f64_nxv1f64_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv1f64_nxv1f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vfwadd.wf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -670,7 +685,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -701,13 +716,14 @@ , float, , + i32, i32); define @intrinsic_vfwadd.w_mask_wf_nxv2f64_nxv2f64_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv2f64_nxv2f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vfwadd.wf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -716,7 +732,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -747,13 +763,14 @@ , float, , + i32, i32); define @intrinsic_vfwadd.w_mask_wf_nxv4f64_nxv4f64_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv4f64_nxv4f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vfwadd.wf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -762,7 +779,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -793,13 +810,14 @@ , float, , + i32, i32); define @intrinsic_vfwadd.w_mask_wf_nxv8f64_nxv8f64_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv8f64_nxv8f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vfwadd.wf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -808,7 +826,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -816,7 +834,7 @@ define @intrinsic_vfwadd.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfwadd.wv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -825,7 +843,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -833,7 +851,7 @@ define @intrinsic_vfwadd.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfwadd.wv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -842,7 +860,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -850,7 +868,7 @@ define @intrinsic_vfwadd.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfwadd.wv v8, v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -859,7 +877,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -867,7 +885,7 @@ define @intrinsic_vfwadd.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfwadd.wv v8, v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -876,7 +894,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -884,7 +902,7 @@ define @intrinsic_vfwadd.w_mask_wv_tie_nxv16f32_nxv16f32_nxv16f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv16f32_nxv16f32_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfwadd.wv v8, v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -893,7 +911,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -901,7 +919,7 @@ define @intrinsic_vfwadd.w_mask_wv_tie_nxv1f64_nxv1f64_nxv1f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv1f64_nxv1f64_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfwadd.wv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -910,7 +928,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -918,7 +936,7 @@ define @intrinsic_vfwadd.w_mask_wv_tie_nxv2f64_nxv2f64_nxv2f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv2f64_nxv2f64_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfwadd.wv v8, v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -927,7 +945,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -935,7 +953,7 @@ define @intrinsic_vfwadd.w_mask_wv_tie_nxv4f64_nxv4f64_nxv4f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv4f64_nxv4f64_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfwadd.wv v8, v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -944,7 +962,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -952,7 +970,7 @@ define @intrinsic_vfwadd.w_mask_wv_tie_nxv8f64_nxv8f64_nxv8f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv8f64_nxv8f64_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfwadd.wv v8, v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -961,7 +979,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -970,7 +988,7 @@ ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv1f32_nxv1f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vfwadd.wf v8, v8, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -979,7 +997,7 @@ %0, half %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -988,7 +1006,7 @@ ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv2f32_nxv2f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vfwadd.wf v8, v8, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -997,7 +1015,7 @@ %0, half %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1006,7 +1024,7 @@ ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv4f32_nxv4f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vfwadd.wf v8, v8, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1015,7 +1033,7 @@ %0, half %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1024,7 +1042,7 @@ ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv8f32_nxv8f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vfwadd.wf v8, v8, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1033,7 +1051,7 @@ %0, half %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1042,7 +1060,7 @@ ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv16f32_nxv16f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vfwadd.wf v8, v8, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1051,7 +1069,7 @@ %0, half %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1060,7 +1078,7 @@ ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv1f64_nxv1f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vfwadd.wf v8, v8, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1069,7 +1087,7 @@ %0, float %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1078,7 +1096,7 @@ ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv2f64_nxv2f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vfwadd.wf v8, v8, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1087,7 +1105,7 @@ %0, float %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1096,7 +1114,7 @@ ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv4f64_nxv4f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vfwadd.wf v8, v8, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1105,7 +1123,7 @@ %0, float %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1114,7 +1132,7 @@ ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv8f64_nxv8f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vfwadd.wf v8, v8, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1123,7 +1141,7 @@ %0, float %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv64.ll @@ -26,12 +26,13 @@ , , , + i64, i64); define @intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f32_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f32_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfwadd.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -70,12 +71,13 @@ , , , + i64, i64); define @intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f32_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f32_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfwadd.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -114,12 +116,13 @@ , , , + i64, i64); define @intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f32_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f32_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfwadd.wv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -158,12 +161,13 @@ , , , + i64, i64); define @intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f32_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f32_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfwadd.wv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -202,13 +206,14 @@ , , , + i64, i64); define @intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f32_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f32_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl4re16.v v28, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vfwadd.wv v8, v16, v28, v0.t ; CHECK-NEXT: ret entry: @@ -217,7 +222,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -247,12 +252,13 @@ , , , + i64, i64); define @intrinsic_vfwadd.w_mask_wv_nxv1f64_nxv1f64_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv1f64_nxv1f64_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfwadd.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -261,7 +267,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -291,12 +297,13 @@ , , , + i64, i64); define @intrinsic_vfwadd.w_mask_wv_nxv2f64_nxv2f64_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv2f64_nxv2f64_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfwadd.wv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -335,12 +342,13 @@ , , , + i64, i64); define @intrinsic_vfwadd.w_mask_wv_nxv4f64_nxv4f64_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv4f64_nxv4f64_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfwadd.wv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -379,13 +387,14 @@ , , , + i64, i64); define @intrinsic_vfwadd.w_mask_wv_nxv8f64_nxv8f64_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv8f64_nxv8f64_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl4re32.v v28, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vfwadd.wv v8, v16, v28, v0.t ; CHECK-NEXT: ret entry: @@ -394,7 +403,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -425,13 +434,14 @@ , half, , + i64, i64); define @intrinsic_vfwadd.w_mask_wf_nxv1f32_nxv1f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv1f32_nxv1f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vfwadd.wf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -440,7 +450,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -471,13 +481,14 @@ , half, , + i64, i64); define @intrinsic_vfwadd.w_mask_wf_nxv2f32_nxv2f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv2f32_nxv2f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vfwadd.wf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -486,7 +497,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -517,13 +528,14 @@ , half, , + i64, i64); define @intrinsic_vfwadd.w_mask_wf_nxv4f32_nxv4f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv4f32_nxv4f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vfwadd.wf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -532,7 +544,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -563,13 +575,14 @@ , half, , + i64, i64); define @intrinsic_vfwadd.w_mask_wf_nxv8f32_nxv8f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv8f32_nxv8f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vfwadd.wf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -578,7 +591,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -609,13 +622,14 @@ , half, , + i64, i64); define @intrinsic_vfwadd.w_mask_wf_nxv16f32_nxv16f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv16f32_nxv16f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vfwadd.wf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -624,7 +638,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -655,13 +669,14 @@ , float, , + i64, i64); define @intrinsic_vfwadd.w_mask_wf_nxv1f64_nxv1f64_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv1f64_nxv1f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vfwadd.wf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -670,7 +685,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -701,13 +716,14 @@ , float, , + i64, i64); define @intrinsic_vfwadd.w_mask_wf_nxv2f64_nxv2f64_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv2f64_nxv2f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vfwadd.wf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -716,7 +732,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -747,13 +763,14 @@ , float, , + i64, i64); define @intrinsic_vfwadd.w_mask_wf_nxv4f64_nxv4f64_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv4f64_nxv4f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vfwadd.wf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -762,7 +779,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -793,13 +810,14 @@ , float, , + i64, i64); define @intrinsic_vfwadd.w_mask_wf_nxv8f64_nxv8f64_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv8f64_nxv8f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vfwadd.wf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -808,7 +826,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -816,7 +834,7 @@ define @intrinsic_vfwadd.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfwadd.wv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -825,7 +843,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -833,7 +851,7 @@ define @intrinsic_vfwadd.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfwadd.wv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -842,7 +860,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -850,7 +868,7 @@ define @intrinsic_vfwadd.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfwadd.wv v8, v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -859,7 +877,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -867,7 +885,7 @@ define @intrinsic_vfwadd.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfwadd.wv v8, v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -876,7 +894,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -884,7 +902,7 @@ define @intrinsic_vfwadd.w_mask_wv_tie_nxv16f32_nxv16f32_nxv16f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv16f32_nxv16f32_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfwadd.wv v8, v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -893,7 +911,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -901,7 +919,7 @@ define @intrinsic_vfwadd.w_mask_wv_tie_nxv1f64_nxv1f64_nxv1f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv1f64_nxv1f64_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfwadd.wv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -910,7 +928,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -918,7 +936,7 @@ define @intrinsic_vfwadd.w_mask_wv_tie_nxv2f64_nxv2f64_nxv2f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv2f64_nxv2f64_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfwadd.wv v8, v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -927,7 +945,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -935,7 +953,7 @@ define @intrinsic_vfwadd.w_mask_wv_tie_nxv4f64_nxv4f64_nxv4f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv4f64_nxv4f64_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfwadd.wv v8, v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -944,7 +962,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -952,7 +970,7 @@ define @intrinsic_vfwadd.w_mask_wv_tie_nxv8f64_nxv8f64_nxv8f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv8f64_nxv8f64_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfwadd.wv v8, v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -961,7 +979,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -970,7 +988,7 @@ ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv1f32_nxv1f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vfwadd.wf v8, v8, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -979,7 +997,7 @@ %0, half %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -988,7 +1006,7 @@ ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv2f32_nxv2f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vfwadd.wf v8, v8, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -997,7 +1015,7 @@ %0, half %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1006,7 +1024,7 @@ ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv4f32_nxv4f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vfwadd.wf v8, v8, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1015,7 +1033,7 @@ %0, half %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1024,7 +1042,7 @@ ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv8f32_nxv8f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vfwadd.wf v8, v8, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1033,7 +1051,7 @@ %0, half %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1042,7 +1060,7 @@ ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv16f32_nxv16f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vfwadd.wf v8, v8, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1051,7 +1069,7 @@ %0, half %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1060,7 +1078,7 @@ ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv1f64_nxv1f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vfwadd.wf v8, v8, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1069,7 +1087,7 @@ %0, float %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1078,7 +1096,7 @@ ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv2f64_nxv2f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vfwadd.wf v8, v8, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1087,7 +1105,7 @@ %0, float %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1096,7 +1114,7 @@ ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv4f64_nxv4f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vfwadd.wf v8, v8, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1105,7 +1123,7 @@ %0, float %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1114,7 +1132,7 @@ ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv8f64_nxv8f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vfwadd.wf v8, v8, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1123,7 +1141,7 @@ %0, float %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-rv32.ll @@ -24,12 +24,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfwcvt.f.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -37,7 +38,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -65,12 +66,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_f.f.v_nxv2f32_nxv2f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv2f32_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfwcvt.f.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -78,7 +80,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -106,12 +108,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_f.f.v_nxv4f32_nxv4f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv4f32_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfwcvt.f.f.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -119,7 +122,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -147,12 +150,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_f.f.v_nxv8f32_nxv8f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv8f32_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfwcvt.f.f.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -160,7 +164,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -188,12 +192,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_f.f.v_nxv16f32_nxv16f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv16f32_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfwcvt.f.f.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -201,7 +206,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -229,12 +234,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_f.f.v_nxv1f64_nxv1f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv1f64_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfwcvt.f.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -242,7 +248,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -270,12 +276,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_f.f.v_nxv2f64_nxv2f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv2f64_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfwcvt.f.f.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -283,7 +290,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -311,12 +318,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_f.f.v_nxv4f64_nxv4f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv4f64_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfwcvt.f.f.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -324,7 +332,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -352,12 +360,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_f.f.v_nxv8f64_nxv8f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv8f64_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfwcvt.f.f.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -365,7 +374,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-rv64.ll @@ -24,12 +24,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfwcvt.f.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -37,7 +38,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -65,12 +66,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_f.f.v_nxv2f32_nxv2f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv2f32_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfwcvt.f.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -78,7 +80,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -106,12 +108,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_f.f.v_nxv4f32_nxv4f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv4f32_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfwcvt.f.f.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -119,7 +122,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -147,12 +150,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_f.f.v_nxv8f32_nxv8f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv8f32_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfwcvt.f.f.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -160,7 +164,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -188,12 +192,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_f.f.v_nxv16f32_nxv16f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv16f32_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfwcvt.f.f.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -201,7 +206,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -229,12 +234,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_f.f.v_nxv1f64_nxv1f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv1f64_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfwcvt.f.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -242,7 +248,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -270,12 +276,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_f.f.v_nxv2f64_nxv2f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv2f64_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfwcvt.f.f.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -283,7 +290,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -311,12 +318,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_f.f.v_nxv4f64_nxv4f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv4f64_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfwcvt.f.f.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -324,7 +332,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -352,12 +360,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_f.f.v_nxv8f64_nxv8f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv8f64_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfwcvt.f.f.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -365,7 +374,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x-rv32.ll @@ -24,12 +24,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vfwcvt.f.x.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -37,7 +38,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -65,12 +66,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_f.x.v_nxv2f16_nxv2i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vfwcvt.f.x.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -78,7 +80,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -106,12 +108,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_f.x.v_nxv4f16_nxv4i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vfwcvt.f.x.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -119,7 +122,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -147,12 +150,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_f.x.v_nxv8f16_nxv8i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vfwcvt.f.x.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -160,7 +164,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -188,12 +192,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_f.x.v_nxv16f16_nxv16i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vfwcvt.f.x.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -201,7 +206,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -229,12 +234,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_f.x.v_nxv32f16_nxv32i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv32f16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vfwcvt.f.x.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -242,7 +248,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -270,12 +276,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_f.x.v_nxv1f32_nxv1i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfwcvt.f.x.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -283,7 +290,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -311,12 +318,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_f.x.v_nxv2f32_nxv2i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfwcvt.f.x.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -324,7 +332,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -352,12 +360,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_f.x.v_nxv4f32_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfwcvt.f.x.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -365,7 +374,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -393,12 +402,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_f.x.v_nxv8f32_nxv8i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfwcvt.f.x.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -406,7 +416,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -434,12 +444,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_f.x.v_nxv16f32_nxv16i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv16f32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfwcvt.f.x.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -447,7 +458,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -475,12 +486,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_f.x.v_nxv1f64_nxv1i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfwcvt.f.x.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -488,7 +500,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -516,12 +528,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_f.x.v_nxv2f64_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfwcvt.f.x.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -529,7 +542,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -557,12 +570,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_f.x.v_nxv4f64_nxv4i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfwcvt.f.x.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +584,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -598,12 +612,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_f.x.v_nxv8f64_nxv8i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv8f64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfwcvt.f.x.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -611,7 +626,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x-rv64.ll @@ -24,12 +24,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vfwcvt.f.x.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -37,7 +38,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -65,12 +66,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_f.x.v_nxv2f16_nxv2i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vfwcvt.f.x.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -78,7 +80,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -106,12 +108,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_f.x.v_nxv4f16_nxv4i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vfwcvt.f.x.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -119,7 +122,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -147,12 +150,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_f.x.v_nxv8f16_nxv8i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vfwcvt.f.x.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -160,7 +164,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -188,12 +192,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_f.x.v_nxv16f16_nxv16i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vfwcvt.f.x.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -201,7 +206,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -229,12 +234,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_f.x.v_nxv32f16_nxv32i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv32f16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vfwcvt.f.x.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -242,7 +248,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -270,12 +276,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_f.x.v_nxv1f32_nxv1i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfwcvt.f.x.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -283,7 +290,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -311,12 +318,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_f.x.v_nxv2f32_nxv2i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfwcvt.f.x.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -324,7 +332,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -352,12 +360,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_f.x.v_nxv4f32_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfwcvt.f.x.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -365,7 +374,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -393,12 +402,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_f.x.v_nxv8f32_nxv8i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfwcvt.f.x.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -406,7 +416,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -434,12 +444,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_f.x.v_nxv16f32_nxv16i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv16f32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfwcvt.f.x.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -447,7 +458,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -475,12 +486,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_f.x.v_nxv1f64_nxv1i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfwcvt.f.x.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -488,7 +500,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -516,12 +528,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_f.x.v_nxv2f64_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfwcvt.f.x.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -529,7 +542,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -557,12 +570,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_f.x.v_nxv4f64_nxv4i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfwcvt.f.x.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +584,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -598,12 +612,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_f.x.v_nxv8f64_nxv8i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv8f64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfwcvt.f.x.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -611,7 +626,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu-rv32.ll @@ -24,12 +24,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_f.xu.v_nxv1f16_nxv1i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -37,7 +38,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -65,12 +66,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_f.xu.v_nxv2f16_nxv2i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -78,7 +80,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -106,12 +108,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_f.xu.v_nxv4f16_nxv4i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -119,7 +122,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -147,12 +150,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_f.xu.v_nxv8f16_nxv8i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vfwcvt.f.xu.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -160,7 +164,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -188,12 +192,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_f.xu.v_nxv16f16_nxv16i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vfwcvt.f.xu.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -201,7 +206,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -229,12 +234,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_f.xu.v_nxv32f16_nxv32i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv32f16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vfwcvt.f.xu.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -242,7 +248,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -270,12 +276,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_f.xu.v_nxv1f32_nxv1i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -283,7 +290,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -311,12 +318,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_f.xu.v_nxv2f32_nxv2i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -324,7 +332,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -352,12 +360,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_f.xu.v_nxv4f32_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfwcvt.f.xu.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -365,7 +374,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -393,12 +402,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_f.xu.v_nxv8f32_nxv8i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfwcvt.f.xu.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -406,7 +416,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -434,12 +444,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_f.xu.v_nxv16f32_nxv16i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv16f32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfwcvt.f.xu.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -447,7 +458,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -475,12 +486,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_f.xu.v_nxv1f64_nxv1i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -488,7 +500,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -516,12 +528,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_f.xu.v_nxv2f64_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfwcvt.f.xu.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -529,7 +542,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -557,12 +570,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_f.xu.v_nxv4f64_nxv4i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfwcvt.f.xu.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +584,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -598,12 +612,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_f.xu.v_nxv8f64_nxv8i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv8f64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfwcvt.f.xu.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -611,7 +626,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu-rv64.ll @@ -24,12 +24,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_f.xu.v_nxv1f16_nxv1i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -37,7 +38,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -65,12 +66,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_f.xu.v_nxv2f16_nxv2i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -78,7 +80,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -106,12 +108,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_f.xu.v_nxv4f16_nxv4i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -119,7 +122,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -147,12 +150,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_f.xu.v_nxv8f16_nxv8i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vfwcvt.f.xu.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -160,7 +164,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -188,12 +192,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_f.xu.v_nxv16f16_nxv16i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vfwcvt.f.xu.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -201,7 +206,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -229,12 +234,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_f.xu.v_nxv32f16_nxv32i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv32f16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vfwcvt.f.xu.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -242,7 +248,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -270,12 +276,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_f.xu.v_nxv1f32_nxv1i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -283,7 +290,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -311,12 +318,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_f.xu.v_nxv2f32_nxv2i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -324,7 +332,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -352,12 +360,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_f.xu.v_nxv4f32_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfwcvt.f.xu.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -365,7 +374,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -393,12 +402,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_f.xu.v_nxv8f32_nxv8i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfwcvt.f.xu.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -406,7 +416,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -434,12 +444,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_f.xu.v_nxv16f32_nxv16i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv16f32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfwcvt.f.xu.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -447,7 +458,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -475,12 +486,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_f.xu.v_nxv1f64_nxv1i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -488,7 +500,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -516,12 +528,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_f.xu.v_nxv2f64_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfwcvt.f.xu.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -529,7 +542,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -557,12 +570,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_f.xu.v_nxv4f64_nxv4i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfwcvt.f.xu.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +584,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -598,12 +612,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_f.xu.v_nxv8f64_nxv8i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv8f64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfwcvt.f.xu.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -611,7 +626,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f-rv32.ll @@ -24,12 +24,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv1i32_nxv1f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv1i32_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -37,7 +38,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -65,12 +66,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv2i32_nxv2f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv2i32_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -78,7 +80,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -106,12 +108,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv4i32_nxv4f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv4i32_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -119,7 +122,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -147,12 +150,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv8i32_nxv8f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv8i32_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -160,7 +164,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -188,12 +192,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv16i32_nxv16f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv16i32_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -201,7 +206,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -229,12 +234,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv1i64_nxv1f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv1i64_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -242,7 +248,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -270,12 +276,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv2i64_nxv2f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv2i64_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -283,7 +290,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -311,12 +318,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv4i64_nxv4f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv4i64_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -324,7 +332,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -352,12 +360,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv8i64_nxv8f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv8i64_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -365,7 +374,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f-rv64.ll @@ -24,12 +24,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv1i32_nxv1f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv1i32_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -37,7 +38,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -65,12 +66,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv2i32_nxv2f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv2i32_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -78,7 +80,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -106,12 +108,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv4i32_nxv4f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv4i32_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -119,7 +122,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -147,12 +150,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv8i32_nxv8f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv8i32_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -160,7 +164,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -188,12 +192,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv16i32_nxv16f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv16i32_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -201,7 +206,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -229,12 +234,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv1i64_nxv1f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv1i64_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -242,7 +248,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -270,12 +276,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv2i64_nxv2f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv2i64_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -283,7 +290,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -311,12 +318,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv4i64_nxv4f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv4i64_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -324,7 +332,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -352,12 +360,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv8i64_nxv8f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv8i64_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -365,7 +374,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f-rv32.ll @@ -24,12 +24,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv1i32_nxv1f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv1i32_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -37,7 +38,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -65,12 +66,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv2i32_nxv2f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv2i32_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -78,7 +80,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -106,12 +108,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv4i32_nxv4f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv4i32_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -119,7 +122,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -147,12 +150,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv8i32_nxv8f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv8i32_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -160,7 +164,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -188,12 +192,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv16i32_nxv16f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv16i32_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -201,7 +206,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -229,12 +234,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv1i64_nxv1f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv1i64_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -242,7 +248,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -270,12 +276,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv2i64_nxv2f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv2i64_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -283,7 +290,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -311,12 +318,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv4i64_nxv4f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv4i64_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -324,7 +332,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -352,12 +360,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv8i64_nxv8f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv8i64_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -365,7 +374,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f-rv64.ll @@ -24,12 +24,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv1i32_nxv1f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv1i32_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -37,7 +38,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -65,12 +66,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv2i32_nxv2f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv2i32_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -78,7 +80,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -106,12 +108,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv4i32_nxv4f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv4i32_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -119,7 +122,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -147,12 +150,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv8i32_nxv8f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv8i32_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -160,7 +164,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -188,12 +192,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv16i32_nxv16f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv16i32_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -201,7 +206,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -229,12 +234,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv1i64_nxv1f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv1i64_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -242,7 +248,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -270,12 +276,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv2i64_nxv2f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv2i64_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -283,7 +290,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -311,12 +318,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv4i64_nxv4f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv4i64_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -324,7 +332,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -352,12 +360,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv8i64_nxv8f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv8i64_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -365,7 +374,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f-rv32.ll @@ -24,12 +24,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_x.f.v_nxv1i32_nxv1f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv1i32_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfwcvt.x.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -37,7 +38,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -65,12 +66,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_x.f.v_nxv2i32_nxv2f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv2i32_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfwcvt.x.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -78,7 +80,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -106,12 +108,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_x.f.v_nxv4i32_nxv4f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv4i32_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfwcvt.x.f.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -119,7 +122,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -147,12 +150,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_x.f.v_nxv8i32_nxv8f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv8i32_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfwcvt.x.f.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -160,7 +164,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -188,12 +192,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_x.f.v_nxv16i32_nxv16f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv16i32_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfwcvt.x.f.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -201,7 +206,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -229,12 +234,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_x.f.v_nxv1i64_nxv1f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv1i64_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfwcvt.x.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -242,7 +248,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -270,12 +276,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_x.f.v_nxv2i64_nxv2f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv2i64_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfwcvt.x.f.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -283,7 +290,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -311,12 +318,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_x.f.v_nxv4i64_nxv4f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv4i64_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfwcvt.x.f.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -324,7 +332,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -352,12 +360,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_x.f.v_nxv8i64_nxv8f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv8i64_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfwcvt.x.f.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -365,7 +374,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f-rv64.ll @@ -24,12 +24,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_x.f.v_nxv1i32_nxv1f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv1i32_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfwcvt.x.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -37,7 +38,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -65,12 +66,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_x.f.v_nxv2i32_nxv2f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv2i32_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfwcvt.x.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -78,7 +80,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -106,12 +108,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_x.f.v_nxv4i32_nxv4f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv4i32_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfwcvt.x.f.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -119,7 +122,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -147,12 +150,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_x.f.v_nxv8i32_nxv8f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv8i32_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfwcvt.x.f.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -160,7 +164,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -188,12 +192,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_x.f.v_nxv16i32_nxv16f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv16i32_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfwcvt.x.f.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -201,7 +206,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -229,12 +234,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_x.f.v_nxv1i64_nxv1f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv1i64_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfwcvt.x.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -242,7 +248,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -270,12 +276,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_x.f.v_nxv2i64_nxv2f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv2i64_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfwcvt.x.f.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -283,7 +290,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -311,12 +318,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_x.f.v_nxv4i64_nxv4f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv4i64_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfwcvt.x.f.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -324,7 +332,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -352,12 +360,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_x.f.v_nxv8i64_nxv8f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv8i64_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfwcvt.x.f.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -365,7 +374,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f-rv32.ll @@ -24,12 +24,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfwcvt.xu.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -37,7 +38,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -65,12 +66,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_xu.f.v_nxv2i32_nxv2f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv2i32_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfwcvt.xu.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -78,7 +80,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -106,12 +108,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_xu.f.v_nxv4i32_nxv4f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv4i32_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfwcvt.xu.f.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -119,7 +122,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -147,12 +150,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_xu.f.v_nxv8i32_nxv8f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv8i32_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfwcvt.xu.f.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -160,7 +164,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -188,12 +192,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_xu.f.v_nxv16i32_nxv16f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv16i32_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfwcvt.xu.f.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -201,7 +206,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -229,12 +234,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_xu.f.v_nxv1i64_nxv1f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv1i64_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfwcvt.xu.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -242,7 +248,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -270,12 +276,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_xu.f.v_nxv2i64_nxv2f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv2i64_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfwcvt.xu.f.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -283,7 +290,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -311,12 +318,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_xu.f.v_nxv4i64_nxv4f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv4i64_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfwcvt.xu.f.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -324,7 +332,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -352,12 +360,13 @@ , , , + i32, i32); define @intrinsic_vfwcvt_mask_xu.f.v_nxv8i64_nxv8f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv8i64_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfwcvt.xu.f.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -365,7 +374,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f-rv64.ll @@ -24,12 +24,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfwcvt.xu.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -37,7 +38,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -65,12 +66,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_xu.f.v_nxv2i32_nxv2f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv2i32_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfwcvt.xu.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -78,7 +80,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -106,12 +108,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_xu.f.v_nxv4i32_nxv4f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv4i32_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfwcvt.xu.f.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -119,7 +122,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -147,12 +150,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_xu.f.v_nxv8i32_nxv8f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv8i32_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfwcvt.xu.f.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -160,7 +164,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -188,12 +192,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_xu.f.v_nxv16i32_nxv16f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv16i32_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfwcvt.xu.f.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -201,7 +206,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -229,12 +234,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_xu.f.v_nxv1i64_nxv1f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv1i64_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfwcvt.xu.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -242,7 +248,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -270,12 +276,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_xu.f.v_nxv2i64_nxv2f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv2i64_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfwcvt.xu.f.v v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -283,7 +290,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -311,12 +318,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_xu.f.v_nxv4i64_nxv4f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv4i64_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfwcvt.xu.f.v v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -324,7 +332,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -352,12 +360,13 @@ , , , + i64, i64); define @intrinsic_vfwcvt_mask_xu.f.v_nxv8i64_nxv8f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv8i64_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfwcvt.xu.f.v v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -365,7 +374,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv32.ll @@ -27,12 +27,13 @@ , , , + i32, i32); define @intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfwmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -41,7 +42,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -72,12 +73,13 @@ , , , + i32, i32); define @intrinsic_vfwmul_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfwmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -86,7 +88,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -117,12 +119,13 @@ , , , + i32, i32); define @intrinsic_vfwmul_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfwmul.vv v8, v10, v11, v0.t ; CHECK-NEXT: ret entry: @@ -131,7 +134,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -162,12 +165,13 @@ , , , + i32, i32); define @intrinsic_vfwmul_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfwmul.vv v8, v12, v14, v0.t ; CHECK-NEXT: ret entry: @@ -176,7 +180,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -207,12 +211,13 @@ , , , + i32, i32); define @intrinsic_vfwmul_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv16f32_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfwmul.vv v8, v16, v20, v0.t ; CHECK-NEXT: ret entry: @@ -221,7 +226,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -252,12 +257,13 @@ , , , + i32, i32); define @intrinsic_vfwmul_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfwmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -266,7 +272,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -297,12 +303,13 @@ , , , + i32, i32); define @intrinsic_vfwmul_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfwmul.vv v8, v10, v11, v0.t ; CHECK-NEXT: ret entry: @@ -311,7 +318,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -342,12 +349,13 @@ , , , + i32, i32); define @intrinsic_vfwmul_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfwmul.vv v8, v12, v14, v0.t ; CHECK-NEXT: ret entry: @@ -356,7 +364,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -387,12 +395,13 @@ , , , + i32, i32); define @intrinsic_vfwmul_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv8f64_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfwmul.vv v8, v16, v20, v0.t ; CHECK-NEXT: ret entry: @@ -401,7 +410,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -433,13 +442,14 @@ , half, , + i32, i32); define @intrinsic_vfwmul_mask_vf_nxv1f32_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv1f32_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vfwmul.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -448,7 +458,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -480,13 +490,14 @@ , half, , + i32, i32); define @intrinsic_vfwmul_mask_vf_nxv2f32_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv2f32_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vfwmul.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -495,7 +506,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -527,13 +538,14 @@ , half, , + i32, i32); define @intrinsic_vfwmul_mask_vf_nxv4f32_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv4f32_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vfwmul.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -542,7 +554,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -574,13 +586,14 @@ , half, , + i32, i32); define @intrinsic_vfwmul_mask_vf_nxv8f32_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv8f32_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vfwmul.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -589,7 +602,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -621,13 +634,14 @@ , half, , + i32, i32); define @intrinsic_vfwmul_mask_vf_nxv16f32_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv16f32_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vfwmul.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -636,7 +650,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -668,13 +682,14 @@ , float, , + i32, i32); define @intrinsic_vfwmul_mask_vf_nxv1f64_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv1f64_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vfwmul.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -683,7 +698,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -715,13 +730,14 @@ , float, , + i32, i32); define @intrinsic_vfwmul_mask_vf_nxv2f64_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv2f64_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vfwmul.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -730,7 +746,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -762,13 +778,14 @@ , float, , + i32, i32); define @intrinsic_vfwmul_mask_vf_nxv4f64_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv4f64_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vfwmul.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -777,7 +794,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -809,13 +826,14 @@ , float, , + i32, i32); define @intrinsic_vfwmul_mask_vf_nxv8f64_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv8f64_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vfwmul.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -824,7 +842,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv64.ll @@ -27,12 +27,13 @@ , , , + i64, i64); define @intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfwmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -41,7 +42,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -72,12 +73,13 @@ , , , + i64, i64); define @intrinsic_vfwmul_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfwmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -86,7 +88,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -117,12 +119,13 @@ , , , + i64, i64); define @intrinsic_vfwmul_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfwmul.vv v8, v10, v11, v0.t ; CHECK-NEXT: ret entry: @@ -131,7 +134,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -162,12 +165,13 @@ , , , + i64, i64); define @intrinsic_vfwmul_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfwmul.vv v8, v12, v14, v0.t ; CHECK-NEXT: ret entry: @@ -176,7 +180,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -207,12 +211,13 @@ , , , + i64, i64); define @intrinsic_vfwmul_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv16f32_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfwmul.vv v8, v16, v20, v0.t ; CHECK-NEXT: ret entry: @@ -221,7 +226,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -252,12 +257,13 @@ , , , + i64, i64); define @intrinsic_vfwmul_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfwmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -266,7 +272,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -297,12 +303,13 @@ , , , + i64, i64); define @intrinsic_vfwmul_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfwmul.vv v8, v10, v11, v0.t ; CHECK-NEXT: ret entry: @@ -311,7 +318,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -342,12 +349,13 @@ , , , + i64, i64); define @intrinsic_vfwmul_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfwmul.vv v8, v12, v14, v0.t ; CHECK-NEXT: ret entry: @@ -356,7 +364,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -387,12 +395,13 @@ , , , + i64, i64); define @intrinsic_vfwmul_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv8f64_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfwmul.vv v8, v16, v20, v0.t ; CHECK-NEXT: ret entry: @@ -401,7 +410,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -433,13 +442,14 @@ , half, , + i64, i64); define @intrinsic_vfwmul_mask_vf_nxv1f32_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv1f32_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vfwmul.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -448,7 +458,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -480,13 +490,14 @@ , half, , + i64, i64); define @intrinsic_vfwmul_mask_vf_nxv2f32_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv2f32_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vfwmul.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -495,7 +506,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -527,13 +538,14 @@ , half, , + i64, i64); define @intrinsic_vfwmul_mask_vf_nxv4f32_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv4f32_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vfwmul.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -542,7 +554,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -574,13 +586,14 @@ , half, , + i64, i64); define @intrinsic_vfwmul_mask_vf_nxv8f32_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv8f32_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vfwmul.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -589,7 +602,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -621,13 +634,14 @@ , half, , + i64, i64); define @intrinsic_vfwmul_mask_vf_nxv16f32_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv16f32_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vfwmul.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -636,7 +650,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -668,13 +682,14 @@ , float, , + i64, i64); define @intrinsic_vfwmul_mask_vf_nxv1f64_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv1f64_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vfwmul.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -683,7 +698,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -715,13 +730,14 @@ , float, , + i64, i64); define @intrinsic_vfwmul_mask_vf_nxv2f64_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv2f64_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vfwmul.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -730,7 +746,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -762,13 +778,14 @@ , float, , + i64, i64); define @intrinsic_vfwmul_mask_vf_nxv4f64_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv4f64_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vfwmul.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -777,7 +794,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -809,13 +826,14 @@ , float, , + i64, i64); define @intrinsic_vfwmul_mask_vf_nxv8f64_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv8f64_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vfwmul.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -824,7 +842,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv32.ll @@ -27,12 +27,13 @@ , , , + i32, i32); define @intrinsic_vfwsub_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfwsub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -41,7 +42,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -72,12 +73,13 @@ , , , + i32, i32); define @intrinsic_vfwsub_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfwsub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -86,7 +88,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -117,12 +119,13 @@ , , , + i32, i32); define @intrinsic_vfwsub_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfwsub.vv v8, v10, v11, v0.t ; CHECK-NEXT: ret entry: @@ -131,7 +134,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -162,12 +165,13 @@ , , , + i32, i32); define @intrinsic_vfwsub_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfwsub.vv v8, v12, v14, v0.t ; CHECK-NEXT: ret entry: @@ -176,7 +180,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -207,12 +211,13 @@ , , , + i32, i32); define @intrinsic_vfwsub_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv16f32_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfwsub.vv v8, v16, v20, v0.t ; CHECK-NEXT: ret entry: @@ -221,7 +226,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -252,12 +257,13 @@ , , , + i32, i32); define @intrinsic_vfwsub_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfwsub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -266,7 +272,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -297,12 +303,13 @@ , , , + i32, i32); define @intrinsic_vfwsub_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfwsub.vv v8, v10, v11, v0.t ; CHECK-NEXT: ret entry: @@ -311,7 +318,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -342,12 +349,13 @@ , , , + i32, i32); define @intrinsic_vfwsub_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfwsub.vv v8, v12, v14, v0.t ; CHECK-NEXT: ret entry: @@ -356,7 +364,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -387,12 +395,13 @@ , , , + i32, i32); define @intrinsic_vfwsub_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv8f64_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfwsub.vv v8, v16, v20, v0.t ; CHECK-NEXT: ret entry: @@ -401,7 +410,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -433,13 +442,14 @@ , half, , + i32, i32); define @intrinsic_vfwsub_mask_vf_nxv1f32_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv1f32_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vfwsub.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -448,7 +458,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -480,13 +490,14 @@ , half, , + i32, i32); define @intrinsic_vfwsub_mask_vf_nxv2f32_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv2f32_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vfwsub.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -495,7 +506,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -527,13 +538,14 @@ , half, , + i32, i32); define @intrinsic_vfwsub_mask_vf_nxv4f32_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv4f32_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vfwsub.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -542,7 +554,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -574,13 +586,14 @@ , half, , + i32, i32); define @intrinsic_vfwsub_mask_vf_nxv8f32_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv8f32_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vfwsub.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -589,7 +602,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -621,13 +634,14 @@ , half, , + i32, i32); define @intrinsic_vfwsub_mask_vf_nxv16f32_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv16f32_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vfwsub.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -636,7 +650,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -668,13 +682,14 @@ , float, , + i32, i32); define @intrinsic_vfwsub_mask_vf_nxv1f64_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv1f64_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vfwsub.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -683,7 +698,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -715,13 +730,14 @@ , float, , + i32, i32); define @intrinsic_vfwsub_mask_vf_nxv2f64_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv2f64_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vfwsub.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -730,7 +746,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -762,13 +778,14 @@ , float, , + i32, i32); define @intrinsic_vfwsub_mask_vf_nxv4f64_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv4f64_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vfwsub.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -777,7 +794,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -809,13 +826,14 @@ , float, , + i32, i32); define @intrinsic_vfwsub_mask_vf_nxv8f64_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv8f64_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vfwsub.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -824,7 +842,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv64.ll @@ -27,12 +27,13 @@ , , , + i64, i64); define @intrinsic_vfwsub_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfwsub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -41,7 +42,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -72,12 +73,13 @@ , , , + i64, i64); define @intrinsic_vfwsub_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfwsub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -86,7 +88,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -117,12 +119,13 @@ , , , + i64, i64); define @intrinsic_vfwsub_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfwsub.vv v8, v10, v11, v0.t ; CHECK-NEXT: ret entry: @@ -131,7 +134,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -162,12 +165,13 @@ , , , + i64, i64); define @intrinsic_vfwsub_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfwsub.vv v8, v12, v14, v0.t ; CHECK-NEXT: ret entry: @@ -176,7 +180,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -207,12 +211,13 @@ , , , + i64, i64); define @intrinsic_vfwsub_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv16f32_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfwsub.vv v8, v16, v20, v0.t ; CHECK-NEXT: ret entry: @@ -221,7 +226,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -252,12 +257,13 @@ , , , + i64, i64); define @intrinsic_vfwsub_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfwsub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -266,7 +272,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -297,12 +303,13 @@ , , , + i64, i64); define @intrinsic_vfwsub_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfwsub.vv v8, v10, v11, v0.t ; CHECK-NEXT: ret entry: @@ -311,7 +318,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -342,12 +349,13 @@ , , , + i64, i64); define @intrinsic_vfwsub_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfwsub.vv v8, v12, v14, v0.t ; CHECK-NEXT: ret entry: @@ -356,7 +364,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -387,12 +395,13 @@ , , , + i64, i64); define @intrinsic_vfwsub_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv8f64_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfwsub.vv v8, v16, v20, v0.t ; CHECK-NEXT: ret entry: @@ -401,7 +410,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -433,13 +442,14 @@ , half, , + i64, i64); define @intrinsic_vfwsub_mask_vf_nxv1f32_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv1f32_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vfwsub.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -448,7 +458,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -480,13 +490,14 @@ , half, , + i64, i64); define @intrinsic_vfwsub_mask_vf_nxv2f32_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv2f32_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vfwsub.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -495,7 +506,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -527,13 +538,14 @@ , half, , + i64, i64); define @intrinsic_vfwsub_mask_vf_nxv4f32_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv4f32_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vfwsub.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -542,7 +554,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -574,13 +586,14 @@ , half, , + i64, i64); define @intrinsic_vfwsub_mask_vf_nxv8f32_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv8f32_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vfwsub.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -589,7 +602,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -621,13 +634,14 @@ , half, , + i64, i64); define @intrinsic_vfwsub_mask_vf_nxv16f32_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv16f32_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vfwsub.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -636,7 +650,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -668,13 +682,14 @@ , float, , + i64, i64); define @intrinsic_vfwsub_mask_vf_nxv1f64_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv1f64_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vfwsub.vf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -683,7 +698,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -715,13 +730,14 @@ , float, , + i64, i64); define @intrinsic_vfwsub_mask_vf_nxv2f64_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv2f64_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vfwsub.vf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -730,7 +746,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -762,13 +778,14 @@ , float, , + i64, i64); define @intrinsic_vfwsub_mask_vf_nxv4f64_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv4f64_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vfwsub.vf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -777,7 +794,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -809,13 +826,14 @@ , float, , + i64, i64); define @intrinsic_vfwsub_mask_vf_nxv8f64_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv8f64_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vfwsub.vf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -824,7 +842,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv32.ll @@ -26,12 +26,13 @@ , , , + i32, i32); define @intrinsic_vfwsub.w_mask_wv_nxv1f32_nxv1f32_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv1f32_nxv1f32_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfwsub.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -70,12 +71,13 @@ , , , + i32, i32); define @intrinsic_vfwsub.w_mask_wv_nxv2f32_nxv2f32_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv2f32_nxv2f32_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfwsub.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -114,12 +116,13 @@ , , , + i32, i32); define @intrinsic_vfwsub.w_mask_wv_nxv4f32_nxv4f32_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv4f32_nxv4f32_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfwsub.wv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -158,12 +161,13 @@ , , , + i32, i32); define @intrinsic_vfwsub.w_mask_wv_nxv8f32_nxv8f32_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv8f32_nxv8f32_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfwsub.wv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -202,13 +206,14 @@ , , , + i32, i32); define @intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f32_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f32_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl4re16.v v28, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vfwsub.wv v8, v16, v28, v0.t ; CHECK-NEXT: ret entry: @@ -217,7 +222,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -247,12 +252,13 @@ , , , + i32, i32); define @intrinsic_vfwsub.w_mask_wv_nxv1f64_nxv1f64_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv1f64_nxv1f64_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfwsub.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -261,7 +267,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -291,12 +297,13 @@ , , , + i32, i32); define @intrinsic_vfwsub.w_mask_wv_nxv2f64_nxv2f64_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv2f64_nxv2f64_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfwsub.wv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -335,12 +342,13 @@ , , , + i32, i32); define @intrinsic_vfwsub.w_mask_wv_nxv4f64_nxv4f64_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv4f64_nxv4f64_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfwsub.wv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -379,13 +387,14 @@ , , , + i32, i32); define @intrinsic_vfwsub.w_mask_wv_nxv8f64_nxv8f64_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv8f64_nxv8f64_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl4re32.v v28, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vfwsub.wv v8, v16, v28, v0.t ; CHECK-NEXT: ret entry: @@ -394,7 +403,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -425,13 +434,14 @@ , half, , + i32, i32); define @intrinsic_vfwsub.w_mask_wf_nxv1f32_nxv1f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv1f32_nxv1f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vfwsub.wf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -440,7 +450,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -471,13 +481,14 @@ , half, , + i32, i32); define @intrinsic_vfwsub.w_mask_wf_nxv2f32_nxv2f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv2f32_nxv2f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vfwsub.wf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -486,7 +497,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -517,13 +528,14 @@ , half, , + i32, i32); define @intrinsic_vfwsub.w_mask_wf_nxv4f32_nxv4f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv4f32_nxv4f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vfwsub.wf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -532,7 +544,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -563,13 +575,14 @@ , half, , + i32, i32); define @intrinsic_vfwsub.w_mask_wf_nxv8f32_nxv8f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv8f32_nxv8f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vfwsub.wf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -578,7 +591,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -609,13 +622,14 @@ , half, , + i32, i32); define @intrinsic_vfwsub.w_mask_wf_nxv16f32_nxv16f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv16f32_nxv16f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vfwsub.wf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -624,7 +638,7 @@ %1, half %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -655,13 +669,14 @@ , float, , + i32, i32); define @intrinsic_vfwsub.w_mask_wf_nxv1f64_nxv1f64_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv1f64_nxv1f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vfwsub.wf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -670,7 +685,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -701,13 +716,14 @@ , float, , + i32, i32); define @intrinsic_vfwsub.w_mask_wf_nxv2f64_nxv2f64_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv2f64_nxv2f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vfwsub.wf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -716,7 +732,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -747,13 +763,14 @@ , float, , + i32, i32); define @intrinsic_vfwsub.w_mask_wf_nxv4f64_nxv4f64_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv4f64_nxv4f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vfwsub.wf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -762,7 +779,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -793,13 +810,14 @@ , float, , + i32, i32); define @intrinsic_vfwsub.w_mask_wf_nxv8f64_nxv8f64_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv8f64_nxv8f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vfwsub.wf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -808,7 +826,7 @@ %1, float %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -816,7 +834,7 @@ define @intrinsic_vfwsub.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfwsub.wv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -825,7 +843,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -833,7 +851,7 @@ define @intrinsic_vfwsub.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfwsub.wv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -842,7 +860,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -850,7 +868,7 @@ define @intrinsic_vfwsub.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfwsub.wv v8, v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -859,7 +877,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -867,7 +885,7 @@ define @intrinsic_vfwsub.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfwsub.wv v8, v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -876,7 +894,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -884,7 +902,7 @@ define @intrinsic_vfwsub.w_mask_wv_tie_nxv16f32_nxv16f32_nxv16f16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv16f32_nxv16f32_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfwsub.wv v8, v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -893,7 +911,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -901,7 +919,7 @@ define @intrinsic_vfwsub.w_mask_wv_tie_nxv1f64_nxv1f64_nxv1f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv1f64_nxv1f64_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfwsub.wv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -910,7 +928,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -918,7 +936,7 @@ define @intrinsic_vfwsub.w_mask_wv_tie_nxv2f64_nxv2f64_nxv2f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv2f64_nxv2f64_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfwsub.wv v8, v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -927,7 +945,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -935,7 +953,7 @@ define @intrinsic_vfwsub.w_mask_wv_tie_nxv4f64_nxv4f64_nxv4f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv4f64_nxv4f64_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfwsub.wv v8, v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -944,7 +962,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -952,7 +970,7 @@ define @intrinsic_vfwsub.w_mask_wv_tie_nxv8f64_nxv8f64_nxv8f32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv8f64_nxv8f64_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfwsub.wv v8, v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -961,7 +979,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -970,7 +988,7 @@ ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv1f32_nxv1f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vfwsub.wf v8, v8, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -979,7 +997,7 @@ %0, half %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -988,7 +1006,7 @@ ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv2f32_nxv2f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vfwsub.wf v8, v8, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -997,7 +1015,7 @@ %0, half %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1006,7 +1024,7 @@ ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv4f32_nxv4f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vfwsub.wf v8, v8, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1015,7 +1033,7 @@ %0, half %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1024,7 +1042,7 @@ ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv8f32_nxv8f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vfwsub.wf v8, v8, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1033,7 +1051,7 @@ %0, half %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1042,7 +1060,7 @@ ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv16f32_nxv16f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vfwsub.wf v8, v8, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1051,7 +1069,7 @@ %0, half %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1060,7 +1078,7 @@ ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv1f64_nxv1f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vfwsub.wf v8, v8, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1069,7 +1087,7 @@ %0, float %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1078,7 +1096,7 @@ ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv2f64_nxv2f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vfwsub.wf v8, v8, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1087,7 +1105,7 @@ %0, float %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1096,7 +1114,7 @@ ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv4f64_nxv4f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vfwsub.wf v8, v8, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1105,7 +1123,7 @@ %0, float %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1114,7 +1132,7 @@ ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv8f64_nxv8f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vfwsub.wf v8, v8, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1123,7 +1141,7 @@ %0, float %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv64.ll @@ -26,12 +26,13 @@ , , , + i64, i64); define @intrinsic_vfwsub.w_mask_wv_nxv1f32_nxv1f32_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv1f32_nxv1f32_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfwsub.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -70,12 +71,13 @@ , , , + i64, i64); define @intrinsic_vfwsub.w_mask_wv_nxv2f32_nxv2f32_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv2f32_nxv2f32_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfwsub.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -114,12 +116,13 @@ , , , + i64, i64); define @intrinsic_vfwsub.w_mask_wv_nxv4f32_nxv4f32_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv4f32_nxv4f32_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfwsub.wv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -158,12 +161,13 @@ , , , + i64, i64); define @intrinsic_vfwsub.w_mask_wv_nxv8f32_nxv8f32_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv8f32_nxv8f32_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfwsub.wv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -202,13 +206,14 @@ , , , + i64, i64); define @intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f32_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f32_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl4re16.v v28, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vfwsub.wv v8, v16, v28, v0.t ; CHECK-NEXT: ret entry: @@ -217,7 +222,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -247,12 +252,13 @@ , , , + i64, i64); define @intrinsic_vfwsub.w_mask_wv_nxv1f64_nxv1f64_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv1f64_nxv1f64_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfwsub.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -261,7 +267,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -291,12 +297,13 @@ , , , + i64, i64); define @intrinsic_vfwsub.w_mask_wv_nxv2f64_nxv2f64_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv2f64_nxv2f64_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfwsub.wv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -335,12 +342,13 @@ , , , + i64, i64); define @intrinsic_vfwsub.w_mask_wv_nxv4f64_nxv4f64_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv4f64_nxv4f64_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfwsub.wv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -379,13 +387,14 @@ , , , + i64, i64); define @intrinsic_vfwsub.w_mask_wv_nxv8f64_nxv8f64_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv8f64_nxv8f64_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl4re32.v v28, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vfwsub.wv v8, v16, v28, v0.t ; CHECK-NEXT: ret entry: @@ -394,7 +403,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -425,13 +434,14 @@ , half, , + i64, i64); define @intrinsic_vfwsub.w_mask_wf_nxv1f32_nxv1f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv1f32_nxv1f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vfwsub.wf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -440,7 +450,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -471,13 +481,14 @@ , half, , + i64, i64); define @intrinsic_vfwsub.w_mask_wf_nxv2f32_nxv2f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv2f32_nxv2f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vfwsub.wf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -486,7 +497,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -517,13 +528,14 @@ , half, , + i64, i64); define @intrinsic_vfwsub.w_mask_wf_nxv4f32_nxv4f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv4f32_nxv4f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vfwsub.wf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -532,7 +544,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -563,13 +575,14 @@ , half, , + i64, i64); define @intrinsic_vfwsub.w_mask_wf_nxv8f32_nxv8f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv8f32_nxv8f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vfwsub.wf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -578,7 +591,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -609,13 +622,14 @@ , half, , + i64, i64); define @intrinsic_vfwsub.w_mask_wf_nxv16f32_nxv16f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv16f32_nxv16f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vfwsub.wf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -624,7 +638,7 @@ %1, half %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -655,13 +669,14 @@ , float, , + i64, i64); define @intrinsic_vfwsub.w_mask_wf_nxv1f64_nxv1f64_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv1f64_nxv1f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vfwsub.wf v8, v9, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -670,7 +685,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -701,13 +716,14 @@ , float, , + i64, i64); define @intrinsic_vfwsub.w_mask_wf_nxv2f64_nxv2f64_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv2f64_nxv2f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vfwsub.wf v8, v10, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -716,7 +732,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -747,13 +763,14 @@ , float, , + i64, i64); define @intrinsic_vfwsub.w_mask_wf_nxv4f64_nxv4f64_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv4f64_nxv4f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vfwsub.wf v8, v12, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -762,7 +779,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -793,13 +810,14 @@ , float, , + i64, i64); define @intrinsic_vfwsub.w_mask_wf_nxv8f64_nxv8f64_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv8f64_nxv8f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vfwsub.wf v8, v16, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -808,7 +826,7 @@ %1, float %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -816,7 +834,7 @@ define @intrinsic_vfwsub.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfwsub.wv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -825,7 +843,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -833,7 +851,7 @@ define @intrinsic_vfwsub.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfwsub.wv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -842,7 +860,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -850,7 +868,7 @@ define @intrinsic_vfwsub.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfwsub.wv v8, v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -859,7 +877,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -867,7 +885,7 @@ define @intrinsic_vfwsub.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfwsub.wv v8, v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -876,7 +894,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -884,7 +902,7 @@ define @intrinsic_vfwsub.w_mask_wv_tie_nxv16f32_nxv16f32_nxv16f16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv16f32_nxv16f32_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfwsub.wv v8, v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -893,7 +911,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -901,7 +919,7 @@ define @intrinsic_vfwsub.w_mask_wv_tie_nxv1f64_nxv1f64_nxv1f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv1f64_nxv1f64_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfwsub.wv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -910,7 +928,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -918,7 +936,7 @@ define @intrinsic_vfwsub.w_mask_wv_tie_nxv2f64_nxv2f64_nxv2f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv2f64_nxv2f64_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfwsub.wv v8, v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -927,7 +945,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -935,7 +953,7 @@ define @intrinsic_vfwsub.w_mask_wv_tie_nxv4f64_nxv4f64_nxv4f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv4f64_nxv4f64_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfwsub.wv v8, v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -944,7 +962,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -952,7 +970,7 @@ define @intrinsic_vfwsub.w_mask_wv_tie_nxv8f64_nxv8f64_nxv8f32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv8f64_nxv8f64_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfwsub.wv v8, v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -961,7 +979,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -970,7 +988,7 @@ ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv1f32_nxv1f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vfwsub.wf v8, v8, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -979,7 +997,7 @@ %0, half %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -988,7 +1006,7 @@ ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv2f32_nxv2f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vfwsub.wf v8, v8, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -997,7 +1015,7 @@ %0, half %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1006,7 +1024,7 @@ ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv4f32_nxv4f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vfwsub.wf v8, v8, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1015,7 +1033,7 @@ %0, half %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1024,7 +1042,7 @@ ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv8f32_nxv8f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vfwsub.wf v8, v8, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1033,7 +1051,7 @@ %0, half %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1042,7 +1060,7 @@ ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv16f32_nxv16f32_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vfwsub.wf v8, v8, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1051,7 +1069,7 @@ %0, half %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1060,7 +1078,7 @@ ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv1f64_nxv1f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vfwsub.wf v8, v8, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1069,7 +1087,7 @@ %0, float %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1078,7 +1096,7 @@ ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv2f64_nxv2f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vfwsub.wf v8, v8, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1087,7 +1105,7 @@ %0, float %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1096,7 +1114,7 @@ ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv4f64_nxv4f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vfwsub.wf v8, v8, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1105,7 +1123,7 @@ %0, float %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1114,7 +1132,7 @@ ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv8f64_nxv8f64_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vfwsub.wf v8, v8, ft0, v0.t ; CHECK-NEXT: ret entry: @@ -1123,7 +1141,7 @@ %0, float %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vle-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vle-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vle-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vle-rv32.ll @@ -24,12 +24,13 @@ , *, , + i32, i32); define @intrinsic_vle_mask_v_nxv1i64_nxv1i64( %0, * %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vle64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -37,7 +38,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -64,12 +65,13 @@ , *, , + i32, i32); define @intrinsic_vle_mask_v_nxv2i64_nxv2i64( %0, * %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vle64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -77,7 +79,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -104,12 +106,13 @@ , *, , + i32, i32); define @intrinsic_vle_mask_v_nxv4i64_nxv4i64( %0, * %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vle64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -117,7 +120,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -144,12 +147,13 @@ , *, , + i32, i32); define @intrinsic_vle_mask_v_nxv8i64_nxv8i64( %0, * %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vle64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -157,7 +161,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -184,12 +188,13 @@ , *, , + i32, i32); define @intrinsic_vle_mask_v_nxv1f64_nxv1f64( %0, * %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vle64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -197,7 +202,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -224,12 +229,13 @@ , *, , + i32, i32); define @intrinsic_vle_mask_v_nxv2f64_nxv2f64( %0, * %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vle64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -237,7 +243,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -264,12 +270,13 @@ , *, , + i32, i32); define @intrinsic_vle_mask_v_nxv4f64_nxv4f64( %0, * %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vle64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -277,7 +284,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -304,12 +311,13 @@ , *, , + i32, i32); define @intrinsic_vle_mask_v_nxv8f64_nxv8f64( %0, * %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vle64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -317,7 +325,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -344,12 +352,13 @@ , *, , + i32, i32); define @intrinsic_vle_mask_v_nxv1i32_nxv1i32( %0, * %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -357,7 +366,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -384,12 +393,13 @@ , *, , + i32, i32); define @intrinsic_vle_mask_v_nxv2i32_nxv2i32( %0, * %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vle32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -397,7 +407,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -424,12 +434,13 @@ , *, , + i32, i32); define @intrinsic_vle_mask_v_nxv4i32_nxv4i32( %0, * %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +448,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -464,12 +475,13 @@ , *, , + i32, i32); define @intrinsic_vle_mask_v_nxv8i32_nxv8i32( %0, * %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vle32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -477,7 +489,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -504,12 +516,13 @@ , *, , + i32, i32); define @intrinsic_vle_mask_v_nxv16i32_nxv16i32( %0, * %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -517,7 +530,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -544,12 +557,13 @@ , *, , + i32, i32); define @intrinsic_vle_mask_v_nxv1f32_nxv1f32( %0, * %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -557,7 +571,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -584,12 +598,13 @@ , *, , + i32, i32); define @intrinsic_vle_mask_v_nxv2f32_nxv2f32( %0, * %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vle32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -597,7 +612,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -624,12 +639,13 @@ , *, , + i32, i32); define @intrinsic_vle_mask_v_nxv4f32_nxv4f32( %0, * %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -637,7 +653,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -664,12 +680,13 @@ , *, , + i32, i32); define @intrinsic_vle_mask_v_nxv8f32_nxv8f32( %0, * %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vle32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -677,7 +694,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -704,12 +721,13 @@ , *, , + i32, i32); define @intrinsic_vle_mask_v_nxv16f32_nxv16f32( %0, * %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -717,7 +735,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -744,12 +762,13 @@ , *, , + i32, i32); define @intrinsic_vle_mask_v_nxv1i16_nxv1i16( %0, * %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -757,7 +776,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -784,12 +803,13 @@ , *, , + i32, i32); define @intrinsic_vle_mask_v_nxv2i16_nxv2i16( %0, * %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -797,7 +817,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -824,12 +844,13 @@ , *, , + i32, i32); define @intrinsic_vle_mask_v_nxv4i16_nxv4i16( %0, * %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vle16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -837,7 +858,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -864,12 +885,13 @@ , *, , + i32, i32); define @intrinsic_vle_mask_v_nxv8i16_nxv8i16( %0, * %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -877,7 +899,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -904,12 +926,13 @@ , *, , + i32, i32); define @intrinsic_vle_mask_v_nxv16i16_nxv16i16( %0, * %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -917,7 +940,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -944,12 +967,13 @@ , *, , + i32, i32); define @intrinsic_vle_mask_v_nxv32i16_nxv32i16( %0, * %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -957,7 +981,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -984,12 +1008,13 @@ , *, , + i32, i32); define @intrinsic_vle_mask_v_nxv1f16_nxv1f16( %0, * %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -997,7 +1022,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1024,12 +1049,13 @@ , *, , + i32, i32); define @intrinsic_vle_mask_v_nxv2f16_nxv2f16( %0, * %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1037,7 +1063,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1064,12 +1090,13 @@ , *, , + i32, i32); define @intrinsic_vle_mask_v_nxv4f16_nxv4f16( %0, * %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vle16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1077,7 +1104,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1104,12 +1131,13 @@ , *, , + i32, i32); define @intrinsic_vle_mask_v_nxv8f16_nxv8f16( %0, * %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1117,7 +1145,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1144,12 +1172,13 @@ , *, , + i32, i32); define @intrinsic_vle_mask_v_nxv16f16_nxv16f16( %0, * %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1157,7 +1186,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1184,12 +1213,13 @@ , *, , + i32, i32); define @intrinsic_vle_mask_v_nxv32f16_nxv32f16( %0, * %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1197,7 +1227,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1224,12 +1254,13 @@ , *, , + i32, i32); define @intrinsic_vle_mask_v_nxv1i8_nxv1i8( %0, * %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1237,7 +1268,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1264,12 +1295,13 @@ , *, , + i32, i32); define @intrinsic_vle_mask_v_nxv2i8_nxv2i8( %0, * %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1277,7 +1309,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1304,12 +1336,13 @@ , *, , + i32, i32); define @intrinsic_vle_mask_v_nxv4i8_nxv4i8( %0, * %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1317,7 +1350,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1344,12 +1377,13 @@ , *, , + i32, i32); define @intrinsic_vle_mask_v_nxv8i8_nxv8i8( %0, * %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vle8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1357,7 +1391,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1384,12 +1418,13 @@ , *, , + i32, i32); define @intrinsic_vle_mask_v_nxv16i8_nxv16i8( %0, * %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1397,7 +1432,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1424,12 +1459,13 @@ , *, , + i32, i32); define @intrinsic_vle_mask_v_nxv32i8_nxv32i8( %0, * %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1437,7 +1473,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1464,12 +1500,13 @@ , *, , + i32, i32); define @intrinsic_vle_mask_v_nxv64i8_nxv64i8( %0, * %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1477,7 +1514,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vle-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vle-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vle-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vle-rv64.ll @@ -24,12 +24,13 @@ , *, , + i64, i64); define @intrinsic_vle_mask_v_nxv1i64_nxv1i64( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vle64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -37,7 +38,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -64,12 +65,13 @@ , *, , + i64, i64); define @intrinsic_vle_mask_v_nxv2i64_nxv2i64( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vle64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -77,7 +79,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -104,12 +106,13 @@ , *, , + i64, i64); define @intrinsic_vle_mask_v_nxv4i64_nxv4i64( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vle64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -117,7 +120,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -144,12 +147,13 @@ , *, , + i64, i64); define @intrinsic_vle_mask_v_nxv8i64_nxv8i64( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vle64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -157,7 +161,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -184,12 +188,13 @@ , *, , + i64, i64); define @intrinsic_vle_mask_v_nxv1f64_nxv1f64( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vle64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -197,7 +202,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -224,12 +229,13 @@ , *, , + i64, i64); define @intrinsic_vle_mask_v_nxv2f64_nxv2f64( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vle64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -237,7 +243,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -264,12 +270,13 @@ , *, , + i64, i64); define @intrinsic_vle_mask_v_nxv4f64_nxv4f64( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vle64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -277,7 +284,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -304,12 +311,13 @@ , *, , + i64, i64); define @intrinsic_vle_mask_v_nxv8f64_nxv8f64( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vle64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -317,7 +325,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -344,12 +352,13 @@ , *, , + i64, i64); define @intrinsic_vle_mask_v_nxv1i32_nxv1i32( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -357,7 +366,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -384,12 +393,13 @@ , *, , + i64, i64); define @intrinsic_vle_mask_v_nxv2i32_nxv2i32( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vle32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -397,7 +407,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -424,12 +434,13 @@ , *, , + i64, i64); define @intrinsic_vle_mask_v_nxv4i32_nxv4i32( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +448,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -464,12 +475,13 @@ , *, , + i64, i64); define @intrinsic_vle_mask_v_nxv8i32_nxv8i32( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vle32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -477,7 +489,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -504,12 +516,13 @@ , *, , + i64, i64); define @intrinsic_vle_mask_v_nxv16i32_nxv16i32( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -517,7 +530,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -544,12 +557,13 @@ , *, , + i64, i64); define @intrinsic_vle_mask_v_nxv1f32_nxv1f32( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -557,7 +571,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -584,12 +598,13 @@ , *, , + i64, i64); define @intrinsic_vle_mask_v_nxv2f32_nxv2f32( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vle32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -597,7 +612,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -624,12 +639,13 @@ , *, , + i64, i64); define @intrinsic_vle_mask_v_nxv4f32_nxv4f32( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -637,7 +653,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -664,12 +680,13 @@ , *, , + i64, i64); define @intrinsic_vle_mask_v_nxv8f32_nxv8f32( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vle32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -677,7 +694,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -704,12 +721,13 @@ , *, , + i64, i64); define @intrinsic_vle_mask_v_nxv16f32_nxv16f32( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -717,7 +735,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -744,12 +762,13 @@ , *, , + i64, i64); define @intrinsic_vle_mask_v_nxv1i16_nxv1i16( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -757,7 +776,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -784,12 +803,13 @@ , *, , + i64, i64); define @intrinsic_vle_mask_v_nxv2i16_nxv2i16( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -797,7 +817,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -824,12 +844,13 @@ , *, , + i64, i64); define @intrinsic_vle_mask_v_nxv4i16_nxv4i16( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vle16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -837,7 +858,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -864,12 +885,13 @@ , *, , + i64, i64); define @intrinsic_vle_mask_v_nxv8i16_nxv8i16( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -877,7 +899,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -904,12 +926,13 @@ , *, , + i64, i64); define @intrinsic_vle_mask_v_nxv16i16_nxv16i16( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -917,7 +940,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -944,12 +967,13 @@ , *, , + i64, i64); define @intrinsic_vle_mask_v_nxv32i16_nxv32i16( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -957,7 +981,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -984,12 +1008,13 @@ , *, , + i64, i64); define @intrinsic_vle_mask_v_nxv1f16_nxv1f16( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -997,7 +1022,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1024,12 +1049,13 @@ , *, , + i64, i64); define @intrinsic_vle_mask_v_nxv2f16_nxv2f16( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1037,7 +1063,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1064,12 +1090,13 @@ , *, , + i64, i64); define @intrinsic_vle_mask_v_nxv4f16_nxv4f16( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vle16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1077,7 +1104,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1104,12 +1131,13 @@ , *, , + i64, i64); define @intrinsic_vle_mask_v_nxv8f16_nxv8f16( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1117,7 +1145,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1144,12 +1172,13 @@ , *, , + i64, i64); define @intrinsic_vle_mask_v_nxv16f16_nxv16f16( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1157,7 +1186,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1184,12 +1213,13 @@ , *, , + i64, i64); define @intrinsic_vle_mask_v_nxv32f16_nxv32f16( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1197,7 +1227,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1224,12 +1254,13 @@ , *, , + i64, i64); define @intrinsic_vle_mask_v_nxv1i8_nxv1i8( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1237,7 +1268,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1264,12 +1295,13 @@ , *, , + i64, i64); define @intrinsic_vle_mask_v_nxv2i8_nxv2i8( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1277,7 +1309,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1304,12 +1336,13 @@ , *, , + i64, i64); define @intrinsic_vle_mask_v_nxv4i8_nxv4i8( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1317,7 +1350,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1344,12 +1377,13 @@ , *, , + i64, i64); define @intrinsic_vle_mask_v_nxv8i8_nxv8i8( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vle8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1357,7 +1391,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1384,12 +1418,13 @@ , *, , + i64, i64); define @intrinsic_vle_mask_v_nxv16i8_nxv16i8( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1397,7 +1432,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1424,12 +1459,13 @@ , *, , + i64, i64); define @intrinsic_vle_mask_v_nxv32i8_nxv32i8( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1437,7 +1473,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1464,12 +1500,13 @@ , *, , + i64, i64); define @intrinsic_vle_mask_v_nxv64i8_nxv64i8( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1477,7 +1514,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vleff-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vleff-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vleff-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vleff-rv32.ll @@ -27,12 +27,13 @@ , *, , + i32, i32); define @intrinsic_vleff_mask_v_nxv1i64_nxv1i64( %0, * %1, %2, i32 %3, i32* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vle64ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -42,7 +43,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) %b = extractvalue { , i32 } %a, 0 %c = extractvalue { , i32 } %a, 1 store i32 %c, i32* %4 @@ -76,12 +77,13 @@ , *, , + i32, i32); define @intrinsic_vleff_mask_v_nxv2i64_nxv2i64( %0, * %1, %2, i32 %3, i32* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vle64ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -91,7 +93,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) %b = extractvalue { , i32 } %a, 0 %c = extractvalue { , i32 } %a, 1 store i32 %c, i32* %4 @@ -125,12 +127,13 @@ , *, , + i32, i32); define @intrinsic_vleff_mask_v_nxv4i64_nxv4i64( %0, * %1, %2, i32 %3, i32* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vle64ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -140,7 +143,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) %b = extractvalue { , i32 } %a, 0 %c = extractvalue { , i32 } %a, 1 store i32 %c, i32* %4 @@ -174,12 +177,13 @@ , *, , + i32, i32); define @intrinsic_vleff_mask_v_nxv8i64_nxv8i64( %0, * %1, %2, i32 %3, i32* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vle64ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -189,7 +193,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) %b = extractvalue { , i32 } %a, 0 %c = extractvalue { , i32 } %a, 1 store i32 %c, i32* %4 @@ -223,12 +227,13 @@ , *, , + i32, i32); define @intrinsic_vleff_mask_v_nxv1f64_nxv1f64( %0, * %1, %2, i32 %3, i32* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vle64ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -238,7 +243,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) %b = extractvalue { , i32 } %a, 0 %c = extractvalue { , i32 } %a, 1 store i32 %c, i32* %4 @@ -272,12 +277,13 @@ , *, , + i32, i32); define @intrinsic_vleff_mask_v_nxv2f64_nxv2f64( %0, * %1, %2, i32 %3, i32* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vle64ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -287,7 +293,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) %b = extractvalue { , i32 } %a, 0 %c = extractvalue { , i32 } %a, 1 store i32 %c, i32* %4 @@ -321,12 +327,13 @@ , *, , + i32, i32); define @intrinsic_vleff_mask_v_nxv4f64_nxv4f64( %0, * %1, %2, i32 %3, i32* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vle64ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -336,7 +343,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) %b = extractvalue { , i32 } %a, 0 %c = extractvalue { , i32 } %a, 1 store i32 %c, i32* %4 @@ -370,12 +377,13 @@ , *, , + i32, i32); define @intrinsic_vleff_mask_v_nxv8f64_nxv8f64( %0, * %1, %2, i32 %3, i32* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vle64ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -385,7 +393,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) %b = extractvalue { , i32 } %a, 0 %c = extractvalue { , i32 } %a, 1 store i32 %c, i32* %4 @@ -419,12 +427,13 @@ , *, , + i32, i32); define @intrinsic_vleff_mask_v_nxv1i32_nxv1i32( %0, * %1, %2, i32 %3, i32* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vle32ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -434,7 +443,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) %b = extractvalue { , i32 } %a, 0 %c = extractvalue { , i32 } %a, 1 store i32 %c, i32* %4 @@ -468,12 +477,13 @@ , *, , + i32, i32); define @intrinsic_vleff_mask_v_nxv2i32_nxv2i32( %0, * %1, %2, i32 %3, i32* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vle32ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -483,7 +493,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) %b = extractvalue { , i32 } %a, 0 %c = extractvalue { , i32 } %a, 1 store i32 %c, i32* %4 @@ -517,12 +527,13 @@ , *, , + i32, i32); define @intrinsic_vleff_mask_v_nxv4i32_nxv4i32( %0, * %1, %2, i32 %3, i32* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vle32ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -532,7 +543,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) %b = extractvalue { , i32 } %a, 0 %c = extractvalue { , i32 } %a, 1 store i32 %c, i32* %4 @@ -566,12 +577,13 @@ , *, , + i32, i32); define @intrinsic_vleff_mask_v_nxv8i32_nxv8i32( %0, * %1, %2, i32 %3, i32* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vle32ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -581,7 +593,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) %b = extractvalue { , i32 } %a, 0 %c = extractvalue { , i32 } %a, 1 store i32 %c, i32* %4 @@ -615,12 +627,13 @@ , *, , + i32, i32); define @intrinsic_vleff_mask_v_nxv16i32_nxv16i32( %0, * %1, %2, i32 %3, i32* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vle32ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -630,7 +643,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) %b = extractvalue { , i32 } %a, 0 %c = extractvalue { , i32 } %a, 1 store i32 %c, i32* %4 @@ -664,12 +677,13 @@ , *, , + i32, i32); define @intrinsic_vleff_mask_v_nxv1f32_nxv1f32( %0, * %1, %2, i32 %3, i32* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vle32ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -679,7 +693,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) %b = extractvalue { , i32 } %a, 0 %c = extractvalue { , i32 } %a, 1 store i32 %c, i32* %4 @@ -713,12 +727,13 @@ , *, , + i32, i32); define @intrinsic_vleff_mask_v_nxv2f32_nxv2f32( %0, * %1, %2, i32 %3, i32* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vle32ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -728,7 +743,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) %b = extractvalue { , i32 } %a, 0 %c = extractvalue { , i32 } %a, 1 store i32 %c, i32* %4 @@ -762,12 +777,13 @@ , *, , + i32, i32); define @intrinsic_vleff_mask_v_nxv4f32_nxv4f32( %0, * %1, %2, i32 %3, i32* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vle32ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -777,7 +793,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) %b = extractvalue { , i32 } %a, 0 %c = extractvalue { , i32 } %a, 1 store i32 %c, i32* %4 @@ -811,12 +827,13 @@ , *, , + i32, i32); define @intrinsic_vleff_mask_v_nxv8f32_nxv8f32( %0, * %1, %2, i32 %3, i32* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vle32ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -826,7 +843,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) %b = extractvalue { , i32 } %a, 0 %c = extractvalue { , i32 } %a, 1 store i32 %c, i32* %4 @@ -860,12 +877,13 @@ , *, , + i32, i32); define @intrinsic_vleff_mask_v_nxv16f32_nxv16f32( %0, * %1, %2, i32 %3, i32* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vle32ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -875,7 +893,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) %b = extractvalue { , i32 } %a, 0 %c = extractvalue { , i32 } %a, 1 store i32 %c, i32* %4 @@ -909,12 +927,13 @@ , *, , + i32, i32); define @intrinsic_vleff_mask_v_nxv1i16_nxv1i16( %0, * %1, %2, i32 %3, i32* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vle16ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -924,7 +943,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) %b = extractvalue { , i32 } %a, 0 %c = extractvalue { , i32 } %a, 1 store i32 %c, i32* %4 @@ -958,12 +977,13 @@ , *, , + i32, i32); define @intrinsic_vleff_mask_v_nxv2i16_nxv2i16( %0, * %1, %2, i32 %3, i32* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vle16ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -973,7 +993,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) %b = extractvalue { , i32 } %a, 0 %c = extractvalue { , i32 } %a, 1 store i32 %c, i32* %4 @@ -1007,12 +1027,13 @@ , *, , + i32, i32); define @intrinsic_vleff_mask_v_nxv4i16_nxv4i16( %0, * %1, %2, i32 %3, i32* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vle16ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1022,7 +1043,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) %b = extractvalue { , i32 } %a, 0 %c = extractvalue { , i32 } %a, 1 store i32 %c, i32* %4 @@ -1056,12 +1077,13 @@ , *, , + i32, i32); define @intrinsic_vleff_mask_v_nxv8i16_nxv8i16( %0, * %1, %2, i32 %3, i32* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vle16ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1071,7 +1093,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) %b = extractvalue { , i32 } %a, 0 %c = extractvalue { , i32 } %a, 1 store i32 %c, i32* %4 @@ -1105,12 +1127,13 @@ , *, , + i32, i32); define @intrinsic_vleff_mask_v_nxv16i16_nxv16i16( %0, * %1, %2, i32 %3, i32* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vle16ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1120,7 +1143,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) %b = extractvalue { , i32 } %a, 0 %c = extractvalue { , i32 } %a, 1 store i32 %c, i32* %4 @@ -1154,12 +1177,13 @@ , *, , + i32, i32); define @intrinsic_vleff_mask_v_nxv32i16_nxv32i16( %0, * %1, %2, i32 %3, i32* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vle16ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1169,7 +1193,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) %b = extractvalue { , i32 } %a, 0 %c = extractvalue { , i32 } %a, 1 store i32 %c, i32* %4 @@ -1203,12 +1227,13 @@ , *, , + i32, i32); define @intrinsic_vleff_mask_v_nxv1half_nxv1f16( %0, * %1, %2, i32 %3, i32* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1half_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vle16ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1218,7 +1243,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) %b = extractvalue { , i32 } %a, 0 %c = extractvalue { , i32 } %a, 1 store i32 %c, i32* %4 @@ -1252,12 +1277,13 @@ , *, , + i32, i32); define @intrinsic_vleff_mask_v_nxv2half_nxv2f16( %0, * %1, %2, i32 %3, i32* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2half_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vle16ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1267,7 +1293,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) %b = extractvalue { , i32 } %a, 0 %c = extractvalue { , i32 } %a, 1 store i32 %c, i32* %4 @@ -1301,12 +1327,13 @@ , *, , + i32, i32); define @intrinsic_vleff_mask_v_nxv4half_nxv4f16( %0, * %1, %2, i32 %3, i32* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4half_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vle16ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1316,7 +1343,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) %b = extractvalue { , i32 } %a, 0 %c = extractvalue { , i32 } %a, 1 store i32 %c, i32* %4 @@ -1350,12 +1377,13 @@ , *, , + i32, i32); define @intrinsic_vleff_mask_v_nxv8half_nxv8f16( %0, * %1, %2, i32 %3, i32* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8half_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vle16ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1365,7 +1393,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) %b = extractvalue { , i32 } %a, 0 %c = extractvalue { , i32 } %a, 1 store i32 %c, i32* %4 @@ -1399,12 +1427,13 @@ , *, , + i32, i32); define @intrinsic_vleff_mask_v_nxv16half_nxv16f16( %0, * %1, %2, i32 %3, i32* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16half_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vle16ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1414,7 +1443,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) %b = extractvalue { , i32 } %a, 0 %c = extractvalue { , i32 } %a, 1 store i32 %c, i32* %4 @@ -1448,12 +1477,13 @@ , *, , + i32, i32); define @intrinsic_vleff_mask_v_nxv32half_nxv32f16( %0, * %1, %2, i32 %3, i32* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv32half_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vle16ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1463,7 +1493,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) %b = extractvalue { , i32 } %a, 0 %c = extractvalue { , i32 } %a, 1 store i32 %c, i32* %4 @@ -1497,12 +1527,13 @@ , *, , + i32, i32); define @intrinsic_vleff_mask_v_nxv1i8_nxv1i8( %0, * %1, %2, i32 %3, i32* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vle8ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1512,7 +1543,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) %b = extractvalue { , i32 } %a, 0 %c = extractvalue { , i32 } %a, 1 store i32 %c, i32* %4 @@ -1546,12 +1577,13 @@ , *, , + i32, i32); define @intrinsic_vleff_mask_v_nxv2i8_nxv2i8( %0, * %1, %2, i32 %3, i32* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vle8ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1561,7 +1593,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) %b = extractvalue { , i32 } %a, 0 %c = extractvalue { , i32 } %a, 1 store i32 %c, i32* %4 @@ -1595,12 +1627,13 @@ , *, , + i32, i32); define @intrinsic_vleff_mask_v_nxv4i8_nxv4i8( %0, * %1, %2, i32 %3, i32* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vle8ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1610,7 +1643,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) %b = extractvalue { , i32 } %a, 0 %c = extractvalue { , i32 } %a, 1 store i32 %c, i32* %4 @@ -1644,12 +1677,13 @@ , *, , + i32, i32); define @intrinsic_vleff_mask_v_nxv8i8_nxv8i8( %0, * %1, %2, i32 %3, i32* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vle8ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1659,7 +1693,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) %b = extractvalue { , i32 } %a, 0 %c = extractvalue { , i32 } %a, 1 store i32 %c, i32* %4 @@ -1693,12 +1727,13 @@ , *, , + i32, i32); define @intrinsic_vleff_mask_v_nxv16i8_nxv16i8( %0, * %1, %2, i32 %3, i32* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vle8ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1708,7 +1743,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) %b = extractvalue { , i32 } %a, 0 %c = extractvalue { , i32 } %a, 1 store i32 %c, i32* %4 @@ -1742,12 +1777,13 @@ , *, , + i32, i32); define @intrinsic_vleff_mask_v_nxv32i8_nxv32i8( %0, * %1, %2, i32 %3, i32* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vle8ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1757,7 +1793,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) %b = extractvalue { , i32 } %a, 0 %c = extractvalue { , i32 } %a, 1 store i32 %c, i32* %4 @@ -1791,12 +1827,13 @@ , *, , + i32, i32); define @intrinsic_vleff_mask_v_nxv64i8_nxv64i8( %0, * %1, %2, i32 %3, i32* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vle8ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1806,7 +1843,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) %b = extractvalue { , i32 } %a, 0 %c = extractvalue { , i32 } %a, 1 store i32 %c, i32* %4 @@ -1832,7 +1869,7 @@ define @intrinsic_vleff_mask_dead_vl( %0, * %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_dead_vl: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vle64ff.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1840,7 +1877,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) %b = extractvalue { , i32 } %a, 0 ret %b @@ -1867,7 +1904,7 @@ define void @intrinsic_vleff_mask_dead_value( %0, * %1, %2, i32 %3, i32* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_dead_value: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vle64ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1877,7 +1914,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) %b = extractvalue { , i32 } %a, 1 store i32 %b, i32* %4 @@ -1901,7 +1938,7 @@ define void @intrinsic_vleff_mask_dead_all( %0, * %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_dead_all: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vle64ff.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1909,7 +1946,7 @@ %0, * %1, %2, - i32 %3) + i32 %3, i32 1) ret void } diff --git a/llvm/test/CodeGen/RISCV/rvv/vleff-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vleff-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vleff-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vleff-rv64.ll @@ -27,12 +27,13 @@ , *, , + i64, i64); define @intrinsic_vleff_mask_v_nxv1i64_nxv1i64( %0, * %1, %2, i64 %3, i64* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vle64ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -42,7 +43,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) %b = extractvalue { , i64 } %a, 0 %c = extractvalue { , i64 } %a, 1 store i64 %c, i64* %4 @@ -76,12 +77,13 @@ , *, , + i64, i64); define @intrinsic_vleff_mask_v_nxv2i64_nxv2i64( %0, * %1, %2, i64 %3, i64* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vle64ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -91,7 +93,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) %b = extractvalue { , i64 } %a, 0 %c = extractvalue { , i64 } %a, 1 store i64 %c, i64* %4 @@ -125,12 +127,13 @@ , *, , + i64, i64); define @intrinsic_vleff_mask_v_nxv4i64_nxv4i64( %0, * %1, %2, i64 %3, i64* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vle64ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -140,7 +143,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) %b = extractvalue { , i64 } %a, 0 %c = extractvalue { , i64 } %a, 1 store i64 %c, i64* %4 @@ -174,12 +177,13 @@ , *, , + i64, i64); define @intrinsic_vleff_mask_v_nxv8i64_nxv8i64( %0, * %1, %2, i64 %3, i64* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vle64ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -189,7 +193,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) %b = extractvalue { , i64 } %a, 0 %c = extractvalue { , i64 } %a, 1 store i64 %c, i64* %4 @@ -223,12 +227,13 @@ , *, , + i64, i64); define @intrinsic_vleff_mask_v_nxv1f64_nxv1f64( %0, * %1, %2, i64 %3, i64* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vle64ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -238,7 +243,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) %b = extractvalue { , i64 } %a, 0 %c = extractvalue { , i64 } %a, 1 store i64 %c, i64* %4 @@ -272,12 +277,13 @@ , *, , + i64, i64); define @intrinsic_vleff_mask_v_nxv2f64_nxv2f64( %0, * %1, %2, i64 %3, i64* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vle64ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -287,7 +293,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) %b = extractvalue { , i64 } %a, 0 %c = extractvalue { , i64 } %a, 1 store i64 %c, i64* %4 @@ -321,12 +327,13 @@ , *, , + i64, i64); define @intrinsic_vleff_mask_v_nxv4f64_nxv4f64( %0, * %1, %2, i64 %3, i64* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vle64ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -336,7 +343,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) %b = extractvalue { , i64 } %a, 0 %c = extractvalue { , i64 } %a, 1 store i64 %c, i64* %4 @@ -370,12 +377,13 @@ , *, , + i64, i64); define @intrinsic_vleff_mask_v_nxv8f64_nxv8f64( %0, * %1, %2, i64 %3, i64* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vle64ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -385,7 +393,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) %b = extractvalue { , i64 } %a, 0 %c = extractvalue { , i64 } %a, 1 store i64 %c, i64* %4 @@ -419,12 +427,13 @@ , *, , + i64, i64); define @intrinsic_vleff_mask_v_nxv1i32_nxv1i32( %0, * %1, %2, i64 %3, i64* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vle32ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -434,7 +443,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) %b = extractvalue { , i64 } %a, 0 %c = extractvalue { , i64 } %a, 1 store i64 %c, i64* %4 @@ -468,12 +477,13 @@ , *, , + i64, i64); define @intrinsic_vleff_mask_v_nxv2i32_nxv2i32( %0, * %1, %2, i64 %3, i64* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vle32ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -483,7 +493,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) %b = extractvalue { , i64 } %a, 0 %c = extractvalue { , i64 } %a, 1 store i64 %c, i64* %4 @@ -517,12 +527,13 @@ , *, , + i64, i64); define @intrinsic_vleff_mask_v_nxv4i32_nxv4i32( %0, * %1, %2, i64 %3, i64* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vle32ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -532,7 +543,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) %b = extractvalue { , i64 } %a, 0 %c = extractvalue { , i64 } %a, 1 store i64 %c, i64* %4 @@ -566,12 +577,13 @@ , *, , + i64, i64); define @intrinsic_vleff_mask_v_nxv8i32_nxv8i32( %0, * %1, %2, i64 %3, i64* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vle32ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -581,7 +593,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) %b = extractvalue { , i64 } %a, 0 %c = extractvalue { , i64 } %a, 1 store i64 %c, i64* %4 @@ -615,12 +627,13 @@ , *, , + i64, i64); define @intrinsic_vleff_mask_v_nxv16i32_nxv16i32( %0, * %1, %2, i64 %3, i64* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vle32ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -630,7 +643,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) %b = extractvalue { , i64 } %a, 0 %c = extractvalue { , i64 } %a, 1 store i64 %c, i64* %4 @@ -664,12 +677,13 @@ , *, , + i64, i64); define @intrinsic_vleff_mask_v_nxv1f32_nxv1f32( %0, * %1, %2, i64 %3, i64* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vle32ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -679,7 +693,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) %b = extractvalue { , i64 } %a, 0 %c = extractvalue { , i64 } %a, 1 store i64 %c, i64* %4 @@ -713,12 +727,13 @@ , *, , + i64, i64); define @intrinsic_vleff_mask_v_nxv2f32_nxv2f32( %0, * %1, %2, i64 %3, i64* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vle32ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -728,7 +743,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) %b = extractvalue { , i64 } %a, 0 %c = extractvalue { , i64 } %a, 1 store i64 %c, i64* %4 @@ -762,12 +777,13 @@ , *, , + i64, i64); define @intrinsic_vleff_mask_v_nxv4f32_nxv4f32( %0, * %1, %2, i64 %3, i64* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vle32ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -777,7 +793,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) %b = extractvalue { , i64 } %a, 0 %c = extractvalue { , i64 } %a, 1 store i64 %c, i64* %4 @@ -811,12 +827,13 @@ , *, , + i64, i64); define @intrinsic_vleff_mask_v_nxv8f32_nxv8f32( %0, * %1, %2, i64 %3, i64* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vle32ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -826,7 +843,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) %b = extractvalue { , i64 } %a, 0 %c = extractvalue { , i64 } %a, 1 store i64 %c, i64* %4 @@ -860,12 +877,13 @@ , *, , + i64, i64); define @intrinsic_vleff_mask_v_nxv16f32_nxv16f32( %0, * %1, %2, i64 %3, i64* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vle32ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -875,7 +893,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) %b = extractvalue { , i64 } %a, 0 %c = extractvalue { , i64 } %a, 1 store i64 %c, i64* %4 @@ -909,12 +927,13 @@ , *, , + i64, i64); define @intrinsic_vleff_mask_v_nxv1i16_nxv1i16( %0, * %1, %2, i64 %3, i64* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vle16ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -924,7 +943,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) %b = extractvalue { , i64 } %a, 0 %c = extractvalue { , i64 } %a, 1 store i64 %c, i64* %4 @@ -958,12 +977,13 @@ , *, , + i64, i64); define @intrinsic_vleff_mask_v_nxv2i16_nxv2i16( %0, * %1, %2, i64 %3, i64* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vle16ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -973,7 +993,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) %b = extractvalue { , i64 } %a, 0 %c = extractvalue { , i64 } %a, 1 store i64 %c, i64* %4 @@ -1007,12 +1027,13 @@ , *, , + i64, i64); define @intrinsic_vleff_mask_v_nxv4i16_nxv4i16( %0, * %1, %2, i64 %3, i64* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vle16ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1022,7 +1043,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) %b = extractvalue { , i64 } %a, 0 %c = extractvalue { , i64 } %a, 1 store i64 %c, i64* %4 @@ -1056,12 +1077,13 @@ , *, , + i64, i64); define @intrinsic_vleff_mask_v_nxv8i16_nxv8i16( %0, * %1, %2, i64 %3, i64* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vle16ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1071,7 +1093,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) %b = extractvalue { , i64 } %a, 0 %c = extractvalue { , i64 } %a, 1 store i64 %c, i64* %4 @@ -1105,12 +1127,13 @@ , *, , + i64, i64); define @intrinsic_vleff_mask_v_nxv16i16_nxv16i16( %0, * %1, %2, i64 %3, i64* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vle16ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1120,7 +1143,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) %b = extractvalue { , i64 } %a, 0 %c = extractvalue { , i64 } %a, 1 store i64 %c, i64* %4 @@ -1154,12 +1177,13 @@ , *, , + i64, i64); define @intrinsic_vleff_mask_v_nxv32i16_nxv32i16( %0, * %1, %2, i64 %3, i64* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vle16ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1169,7 +1193,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) %b = extractvalue { , i64 } %a, 0 %c = extractvalue { , i64 } %a, 1 store i64 %c, i64* %4 @@ -1203,12 +1227,13 @@ , *, , + i64, i64); define @intrinsic_vleff_mask_v_nxv1half_nxv1f16( %0, * %1, %2, i64 %3, i64* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1half_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vle16ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1218,7 +1243,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) %b = extractvalue { , i64 } %a, 0 %c = extractvalue { , i64 } %a, 1 store i64 %c, i64* %4 @@ -1252,12 +1277,13 @@ , *, , + i64, i64); define @intrinsic_vleff_mask_v_nxv2half_nxv2f16( %0, * %1, %2, i64 %3, i64* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2half_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vle16ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1267,7 +1293,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) %b = extractvalue { , i64 } %a, 0 %c = extractvalue { , i64 } %a, 1 store i64 %c, i64* %4 @@ -1301,12 +1327,13 @@ , *, , + i64, i64); define @intrinsic_vleff_mask_v_nxv4half_nxv4f16( %0, * %1, %2, i64 %3, i64* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4half_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vle16ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1316,7 +1343,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) %b = extractvalue { , i64 } %a, 0 %c = extractvalue { , i64 } %a, 1 store i64 %c, i64* %4 @@ -1350,12 +1377,13 @@ , *, , + i64, i64); define @intrinsic_vleff_mask_v_nxv8half_nxv8f16( %0, * %1, %2, i64 %3, i64* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8half_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vle16ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1365,7 +1393,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) %b = extractvalue { , i64 } %a, 0 %c = extractvalue { , i64 } %a, 1 store i64 %c, i64* %4 @@ -1399,12 +1427,13 @@ , *, , + i64, i64); define @intrinsic_vleff_mask_v_nxv16half_nxv16f16( %0, * %1, %2, i64 %3, i64* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16half_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vle16ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1414,7 +1443,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) %b = extractvalue { , i64 } %a, 0 %c = extractvalue { , i64 } %a, 1 store i64 %c, i64* %4 @@ -1448,12 +1477,13 @@ , *, , + i64, i64); define @intrinsic_vleff_mask_v_nxv32half_nxv32f16( %0, * %1, %2, i64 %3, i64* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv32half_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vle16ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1463,7 +1493,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) %b = extractvalue { , i64 } %a, 0 %c = extractvalue { , i64 } %a, 1 store i64 %c, i64* %4 @@ -1497,12 +1527,13 @@ , *, , + i64, i64); define @intrinsic_vleff_mask_v_nxv1i8_nxv1i8( %0, * %1, %2, i64 %3, i64* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vle8ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1512,7 +1543,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) %b = extractvalue { , i64 } %a, 0 %c = extractvalue { , i64 } %a, 1 store i64 %c, i64* %4 @@ -1546,12 +1577,13 @@ , *, , + i64, i64); define @intrinsic_vleff_mask_v_nxv2i8_nxv2i8( %0, * %1, %2, i64 %3, i64* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vle8ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1561,7 +1593,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) %b = extractvalue { , i64 } %a, 0 %c = extractvalue { , i64 } %a, 1 store i64 %c, i64* %4 @@ -1595,12 +1627,13 @@ , *, , + i64, i64); define @intrinsic_vleff_mask_v_nxv4i8_nxv4i8( %0, * %1, %2, i64 %3, i64* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vle8ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1610,7 +1643,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) %b = extractvalue { , i64 } %a, 0 %c = extractvalue { , i64 } %a, 1 store i64 %c, i64* %4 @@ -1644,12 +1677,13 @@ , *, , + i64, i64); define @intrinsic_vleff_mask_v_nxv8i8_nxv8i8( %0, * %1, %2, i64 %3, i64* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vle8ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1659,7 +1693,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) %b = extractvalue { , i64 } %a, 0 %c = extractvalue { , i64 } %a, 1 store i64 %c, i64* %4 @@ -1693,12 +1727,13 @@ , *, , + i64, i64); define @intrinsic_vleff_mask_v_nxv16i8_nxv16i8( %0, * %1, %2, i64 %3, i64* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vle8ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1708,7 +1743,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) %b = extractvalue { , i64 } %a, 0 %c = extractvalue { , i64 } %a, 1 store i64 %c, i64* %4 @@ -1742,12 +1777,13 @@ , *, , + i64, i64); define @intrinsic_vleff_mask_v_nxv32i8_nxv32i8( %0, * %1, %2, i64 %3, i64* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vle8ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1757,7 +1793,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) %b = extractvalue { , i64 } %a, 0 %c = extractvalue { , i64 } %a, 1 store i64 %c, i64* %4 @@ -1791,12 +1827,13 @@ , *, , + i64, i64); define @intrinsic_vleff_mask_v_nxv64i8_nxv64i8( %0, * %1, %2, i64 %3, i64* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_v_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vle8ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1806,7 +1843,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) %b = extractvalue { , i64 } %a, 0 %c = extractvalue { , i64 } %a, 1 store i64 %c, i64* %4 @@ -1832,7 +1869,7 @@ define @intrinsic_vleff_mask_dead_vl( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_dead_vl: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vle64ff.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1840,7 +1877,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) %b = extractvalue { , i64 } %a, 0 ret %b @@ -1867,7 +1904,7 @@ define void @intrinsic_vleff_mask_dead_value( %0, * %1, %2, i64 %3, i64* %4) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_dead_value: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vle64ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1877,7 +1914,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) %b = extractvalue { , i64 } %a, 1 store i64 %b, i64* %4 @@ -1901,7 +1938,7 @@ define void @intrinsic_vleff_mask_dead_all( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vleff_mask_dead_all: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vle64ff.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1909,7 +1946,7 @@ %0, * %1, %2, - i64 %3) + i64 %3, i64 1) ret void } diff --git a/llvm/test/CodeGen/RISCV/rvv/vloxei-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vloxei-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vloxei-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vloxei-rv32.ll @@ -27,12 +27,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxei64.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -41,7 +42,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -72,12 +73,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -86,7 +88,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -117,12 +119,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -131,7 +134,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -162,12 +165,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -176,7 +180,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -207,12 +211,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxei64.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -221,7 +226,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -252,12 +257,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -266,7 +272,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -297,12 +303,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -311,7 +318,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -342,12 +349,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -356,7 +364,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -387,12 +395,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxei64.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -401,7 +410,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -432,12 +441,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -446,7 +456,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -477,12 +487,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -491,7 +502,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -522,12 +533,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -536,7 +548,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -566,12 +578,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxei64.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -580,7 +593,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -610,12 +623,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -624,7 +638,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -654,12 +668,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vloxei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -668,7 +683,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -698,12 +713,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vloxei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -712,7 +728,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -743,12 +759,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxei64.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -757,7 +774,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -788,12 +805,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -802,7 +820,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -833,12 +851,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -847,7 +866,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -878,12 +897,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -892,7 +912,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -923,12 +943,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxei64.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -937,7 +958,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -968,12 +989,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -982,7 +1004,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1013,12 +1035,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -1027,7 +1050,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1058,12 +1081,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -1072,7 +1096,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1102,12 +1126,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxei64.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1116,7 +1141,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1146,12 +1171,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -1160,7 +1186,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1190,12 +1216,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vloxei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -1204,7 +1231,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1234,12 +1261,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vloxei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -1248,7 +1276,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1279,12 +1307,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i32( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1293,7 +1322,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1324,12 +1353,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i32( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1338,7 +1368,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1369,12 +1399,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i32( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -1383,7 +1414,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1414,12 +1445,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i32( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -1428,7 +1460,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1459,12 +1491,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i32( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vloxei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -1473,7 +1506,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1504,12 +1537,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i32( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1518,7 +1552,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1549,12 +1583,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i32( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1563,7 +1598,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1594,12 +1629,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i32( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -1608,7 +1644,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1639,12 +1675,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i32( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -1653,7 +1690,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1684,12 +1721,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i32( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vloxei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -1698,7 +1736,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1728,12 +1766,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i32( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1742,7 +1781,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1772,12 +1811,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i32( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1786,7 +1826,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1816,12 +1856,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i32( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -1830,7 +1871,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1860,12 +1901,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i32( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -1874,7 +1916,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1904,12 +1946,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i32( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vloxei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -1918,7 +1961,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1949,12 +1992,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i32( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1963,7 +2007,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1994,12 +2038,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i32( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -2008,7 +2053,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2039,12 +2084,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i32( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vloxei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -2053,7 +2099,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2084,12 +2130,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i32( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vloxei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -2098,7 +2145,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2129,12 +2176,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i32( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2143,7 +2191,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2174,12 +2222,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i32( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2188,7 +2237,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2219,12 +2268,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i32( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -2233,7 +2283,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2264,12 +2314,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i32( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -2278,7 +2329,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2309,12 +2360,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i32( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vloxei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -2323,7 +2375,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2353,12 +2405,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i32( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2367,7 +2420,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2397,12 +2450,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i32( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2411,7 +2465,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2441,12 +2495,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i32( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -2455,7 +2510,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2485,12 +2540,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i32( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -2499,7 +2555,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2529,12 +2585,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i32( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vloxei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -2543,7 +2600,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2574,12 +2631,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i32( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2588,7 +2646,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2619,12 +2677,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i32( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -2633,7 +2692,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2664,12 +2723,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i32( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vloxei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -2678,7 +2738,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2709,12 +2769,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i32( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vloxei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -2723,7 +2784,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2754,12 +2815,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2768,7 +2830,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2799,12 +2861,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2813,7 +2876,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2844,12 +2907,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2858,7 +2922,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2889,12 +2953,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -2903,7 +2968,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2934,12 +2999,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -2948,7 +3014,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2979,12 +3045,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -2993,7 +3060,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -3023,12 +3090,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3037,7 +3105,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -3067,12 +3135,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3081,7 +3150,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -3111,12 +3180,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3125,7 +3195,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -3155,12 +3225,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -3169,7 +3240,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -3199,12 +3270,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -3213,7 +3285,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -3243,12 +3315,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -3257,7 +3330,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -3288,12 +3361,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3302,7 +3376,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -3333,12 +3407,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3347,7 +3422,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -3378,12 +3453,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -3392,7 +3468,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -3423,12 +3499,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -3437,7 +3514,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -3468,12 +3545,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -3482,7 +3560,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -3513,12 +3591,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3527,7 +3606,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -3558,12 +3637,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -3572,7 +3652,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -3603,12 +3683,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -3617,7 +3698,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -3648,12 +3729,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -3662,7 +3744,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -3692,12 +3774,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3706,7 +3789,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -3736,12 +3819,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3750,7 +3834,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -3780,12 +3864,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3794,7 +3879,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -3824,12 +3909,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -3838,7 +3924,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -3868,12 +3954,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -3882,7 +3969,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -3912,12 +3999,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -3926,7 +4014,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -3957,12 +4045,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3971,7 +4060,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -4002,12 +4091,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4016,7 +4106,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -4047,12 +4137,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -4061,7 +4152,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -4092,12 +4183,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -4106,7 +4198,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -4137,12 +4229,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -4151,7 +4244,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -4182,12 +4275,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4196,7 +4290,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -4227,12 +4321,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -4241,7 +4336,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -4272,12 +4367,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -4286,7 +4382,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -4317,12 +4413,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -4331,7 +4428,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -4361,12 +4458,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4375,7 +4473,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -4405,12 +4503,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4419,7 +4518,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -4449,12 +4548,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4463,7 +4563,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -4493,12 +4593,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4507,7 +4608,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -4537,12 +4638,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -4551,7 +4653,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -4581,12 +4683,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -4595,7 +4698,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -4625,12 +4728,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv64i8_nxv64i8_nxv64i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -4639,7 +4743,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -4670,12 +4774,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4684,7 +4789,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -4715,12 +4820,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4729,7 +4835,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -4760,12 +4866,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4774,7 +4881,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -4805,12 +4912,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -4819,7 +4927,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -4850,12 +4958,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -4864,7 +4973,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -4895,12 +5004,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -4909,7 +5019,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -4940,12 +5050,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4954,7 +5065,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -4985,12 +5096,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4999,7 +5111,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -5030,12 +5142,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -5044,7 +5157,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -5075,12 +5188,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -5089,7 +5203,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -5120,12 +5234,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -5134,7 +5249,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -5165,12 +5280,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5179,7 +5295,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -5210,12 +5326,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -5224,7 +5341,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -5255,12 +5372,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -5269,7 +5387,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -5300,12 +5418,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -5314,7 +5433,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -5345,12 +5464,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5359,7 +5479,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -5390,12 +5510,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5404,7 +5525,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -5435,12 +5556,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5449,7 +5571,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -5480,12 +5602,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -5494,7 +5617,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -5525,12 +5648,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -5539,7 +5663,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -5570,12 +5694,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -5584,7 +5709,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -5615,12 +5740,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5629,7 +5755,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -5660,12 +5786,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5674,7 +5801,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -5705,12 +5832,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -5719,7 +5847,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -5750,12 +5878,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -5764,7 +5893,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -5795,12 +5924,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -5809,7 +5939,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -5840,12 +5970,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5854,7 +5985,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -5885,12 +6016,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -5899,7 +6031,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -5930,12 +6062,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -5944,7 +6077,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -5975,12 +6108,13 @@ *, , , + i32, i32); define @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -5989,7 +6123,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vloxei-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vloxei-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vloxei-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vloxei-rv64.ll @@ -27,12 +27,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxei64.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -41,7 +42,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -72,12 +73,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -86,7 +88,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -117,12 +119,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -131,7 +134,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -162,12 +165,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -176,7 +180,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -207,12 +211,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxei64.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -221,7 +226,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -252,12 +257,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -266,7 +272,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -297,12 +303,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -311,7 +318,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -342,12 +349,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -356,7 +364,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -387,12 +395,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxei64.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -401,7 +410,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -432,12 +441,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -446,7 +456,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -477,12 +487,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -491,7 +502,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -522,12 +533,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -536,7 +548,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -566,12 +578,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxei64.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -580,7 +593,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -610,12 +623,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -624,7 +638,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -654,12 +668,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vloxei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -668,7 +683,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -698,12 +713,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vloxei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -712,7 +728,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -743,12 +759,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxei64.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -757,7 +774,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -788,12 +805,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -802,7 +820,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -833,12 +851,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -847,7 +866,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -878,12 +897,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -892,7 +912,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -923,12 +943,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxei64.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -937,7 +958,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -968,12 +989,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -982,7 +1004,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1013,12 +1035,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -1027,7 +1050,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1058,12 +1081,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -1072,7 +1096,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1102,12 +1126,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxei64.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1116,7 +1141,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1146,12 +1171,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -1160,7 +1186,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1190,12 +1216,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vloxei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -1204,7 +1231,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1234,12 +1261,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vloxei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -1248,7 +1276,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1279,12 +1307,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1293,7 +1322,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1324,12 +1353,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1338,7 +1368,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1369,12 +1399,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -1383,7 +1414,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1414,12 +1445,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -1428,7 +1460,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1459,12 +1491,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i32( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vloxei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -1473,7 +1506,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1504,12 +1537,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1518,7 +1552,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1549,12 +1583,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1563,7 +1598,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1594,12 +1629,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -1608,7 +1644,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1639,12 +1675,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -1653,7 +1690,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1684,12 +1721,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i32( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vloxei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -1698,7 +1736,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1728,12 +1766,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1742,7 +1781,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1772,12 +1811,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1786,7 +1826,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1816,12 +1856,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -1830,7 +1871,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1860,12 +1901,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -1874,7 +1916,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1904,12 +1946,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i32( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vloxei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -1918,7 +1961,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1949,12 +1992,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1963,7 +2007,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1994,12 +2038,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -2008,7 +2053,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -2039,12 +2084,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vloxei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -2053,7 +2099,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -2084,12 +2130,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vloxei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -2098,7 +2145,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -2129,12 +2176,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2143,7 +2191,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -2174,12 +2222,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2188,7 +2237,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -2219,12 +2268,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -2233,7 +2283,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -2264,12 +2314,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -2278,7 +2329,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -2309,12 +2360,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i32( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vloxei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -2323,7 +2375,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -2353,12 +2405,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2367,7 +2420,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -2397,12 +2450,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2411,7 +2465,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -2441,12 +2495,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -2455,7 +2510,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -2485,12 +2540,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -2499,7 +2555,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -2529,12 +2585,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i32( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vloxei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -2543,7 +2600,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -2574,12 +2631,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2588,7 +2646,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -2619,12 +2677,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -2633,7 +2692,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -2664,12 +2723,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vloxei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -2678,7 +2738,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -2709,12 +2769,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vloxei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -2723,7 +2784,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -2754,12 +2815,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2768,7 +2830,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -2799,12 +2861,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2813,7 +2876,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -2844,12 +2907,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2858,7 +2922,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -2889,12 +2953,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -2903,7 +2968,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -2934,12 +2999,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -2948,7 +3014,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -2979,12 +3045,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -2993,7 +3060,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -3023,12 +3090,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3037,7 +3105,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -3067,12 +3135,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3081,7 +3150,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -3111,12 +3180,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3125,7 +3195,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -3155,12 +3225,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -3169,7 +3240,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -3199,12 +3270,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -3213,7 +3285,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -3243,12 +3315,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -3257,7 +3330,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -3288,12 +3361,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3302,7 +3376,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -3333,12 +3407,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3347,7 +3422,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -3378,12 +3453,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -3392,7 +3468,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -3423,12 +3499,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -3437,7 +3514,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -3468,12 +3545,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -3482,7 +3560,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -3513,12 +3591,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3527,7 +3606,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -3558,12 +3637,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -3572,7 +3652,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -3603,12 +3683,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -3617,7 +3698,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -3648,12 +3729,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -3662,7 +3744,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -3692,12 +3774,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3706,7 +3789,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -3736,12 +3819,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3750,7 +3834,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -3780,12 +3864,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3794,7 +3879,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -3824,12 +3909,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -3838,7 +3924,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -3868,12 +3954,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -3882,7 +3969,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -3912,12 +3999,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -3926,7 +4014,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -3957,12 +4045,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3971,7 +4060,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -4002,12 +4091,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4016,7 +4106,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -4047,12 +4137,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -4061,7 +4152,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -4092,12 +4183,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -4106,7 +4198,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -4137,12 +4229,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -4151,7 +4244,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -4182,12 +4275,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4196,7 +4290,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -4227,12 +4321,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -4241,7 +4336,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -4272,12 +4367,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -4286,7 +4382,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -4317,12 +4413,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vloxei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -4331,7 +4428,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -4361,12 +4458,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4375,7 +4473,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -4405,12 +4503,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4419,7 +4518,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -4449,12 +4548,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4463,7 +4563,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -4493,12 +4593,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4507,7 +4608,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -4537,12 +4638,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -4551,7 +4653,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -4581,12 +4683,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -4595,7 +4698,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -4625,12 +4728,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv64i8_nxv64i8_nxv64i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -4639,7 +4743,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -4670,12 +4774,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4684,7 +4789,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -4715,12 +4820,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4729,7 +4835,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -4760,12 +4866,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4774,7 +4881,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -4805,12 +4912,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -4819,7 +4927,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -4850,12 +4958,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -4864,7 +4973,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -4895,12 +5004,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -4909,7 +5019,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -4940,12 +5050,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4954,7 +5065,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -4985,12 +5096,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4999,7 +5111,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -5030,12 +5142,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -5044,7 +5157,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -5075,12 +5188,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -5089,7 +5203,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -5120,12 +5234,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -5134,7 +5249,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -5165,12 +5280,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5179,7 +5295,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -5210,12 +5326,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -5224,7 +5341,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -5255,12 +5372,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -5269,7 +5387,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -5300,12 +5418,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -5314,7 +5433,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -5345,12 +5464,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5359,7 +5479,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -5390,12 +5510,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5404,7 +5525,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -5435,12 +5556,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5449,7 +5571,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -5480,12 +5602,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -5494,7 +5617,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -5525,12 +5648,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -5539,7 +5663,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -5570,12 +5694,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -5584,7 +5709,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -5615,12 +5740,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5629,7 +5755,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -5660,12 +5786,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5674,7 +5801,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -5705,12 +5832,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -5719,7 +5847,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -5750,12 +5878,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -5764,7 +5893,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -5795,12 +5924,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -5809,7 +5939,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -5840,12 +5970,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5854,7 +5985,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -5885,12 +6016,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -5899,7 +6031,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -5930,12 +6062,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -5944,7 +6077,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -5975,12 +6108,13 @@ *, , , + i64, i64); define @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vloxei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -5989,7 +6123,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv32.ll @@ -3,7 +3,7 @@ ; RUN: -verify-machineinstrs < %s | FileCheck %s declare {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i16(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i16(,, i16*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i16(,, i16*, , , i32, i32) define @test_vloxseg2_nxv16i16_nxv16i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16i16_nxv16i16: @@ -22,18 +22,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i16( %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i16( %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i8(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i8(,, i16*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i8(,, i16*, , , i32, i32) define @test_vloxseg2_nxv16i16_nxv16i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16i16_nxv16i8: @@ -52,18 +52,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i8( %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i8( %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i32(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i32(,, i16*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i32(,, i16*, , , i32, i32) define @test_vloxseg2_nxv16i16_nxv16i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16i16_nxv16i32: @@ -82,18 +82,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v4, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i32( %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i32( %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i8(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i8(,, i8*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i8(,, i8*, , , i32, i32) define @test_vloxseg2_nxv1i8_nxv1i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i8_nxv1i8: @@ -112,18 +112,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i8( %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i8( %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i32(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i32(,, i8*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i32(,, i8*, , , i32, i32) define @test_vloxseg2_nxv1i8_nxv1i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i8_nxv1i32: @@ -142,18 +142,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i32( %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i32( %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i16(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i16(,, i8*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i16(,, i8*, , , i32, i32) define @test_vloxseg2_nxv1i8_nxv1i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i8_nxv1i16: @@ -172,18 +172,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i16( %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i16( %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i8(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i8(,,, i8*, , , i32) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i8(,,, i8*, , , i32, i32) define @test_vloxseg3_nxv1i8_nxv1i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i8_nxv1i8: @@ -204,18 +204,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i32(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i32(,,, i8*, , , i32) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i32(,,, i8*, , , i32, i32) define @test_vloxseg3_nxv1i8_nxv1i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i8_nxv1i32: @@ -236,18 +236,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i16(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i16(,,, i8*, , , i32) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i16(,,, i8*, , , i32, i32) define @test_vloxseg3_nxv1i8_nxv1i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i8_nxv1i16: @@ -268,18 +268,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i8(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i8(,,,, i8*, , , i32) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i8(,,,, i8*, , , i32, i32) define @test_vloxseg4_nxv1i8_nxv1i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i8_nxv1i8: @@ -301,18 +301,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i32(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i32(,,,, i8*, , , i32) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i32(,,,, i8*, , , i32, i32) define @test_vloxseg4_nxv1i8_nxv1i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i8_nxv1i32: @@ -334,18 +334,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i16(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i16(,,,, i8*, , , i32) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i16(,,,, i8*, , , i32, i32) define @test_vloxseg4_nxv1i8_nxv1i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i8_nxv1i16: @@ -367,18 +367,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i8(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i8(,,,,, i8*, , , i32) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i8(,,,,, i8*, , , i32, i32) define @test_vloxseg5_nxv1i8_nxv1i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i8_nxv1i8: @@ -401,18 +401,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i32(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i32(,,,,, i8*, , , i32) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i32(,,,,, i8*, , , i32, i32) define @test_vloxseg5_nxv1i8_nxv1i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i8_nxv1i32: @@ -435,18 +435,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i16(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i16(,,,,, i8*, , , i32) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i16(,,,,, i8*, , , i32, i32) define @test_vloxseg5_nxv1i8_nxv1i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i8_nxv1i16: @@ -469,18 +469,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i8(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i8(,,,,,, i8*, , , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i8(,,,,,, i8*, , , i32, i32) define @test_vloxseg6_nxv1i8_nxv1i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i8_nxv1i8: @@ -504,18 +504,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i32(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i32(,,,,,, i8*, , , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i32(,,,,,, i8*, , , i32, i32) define @test_vloxseg6_nxv1i8_nxv1i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i8_nxv1i32: @@ -539,18 +539,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i16(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i16(,,,,,, i8*, , , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i16(,,,,,, i8*, , , i32, i32) define @test_vloxseg6_nxv1i8_nxv1i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i8_nxv1i16: @@ -574,18 +574,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i8(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i8(,,,,,,, i8*, , , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i8(,,,,,,, i8*, , , i32, i32) define @test_vloxseg7_nxv1i8_nxv1i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i8_nxv1i8: @@ -610,18 +610,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i32(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i32(,,,,,,, i8*, , , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i32(,,,,,,, i8*, , , i32, i32) define @test_vloxseg7_nxv1i8_nxv1i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i8_nxv1i32: @@ -646,18 +646,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i16(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i16(,,,,,,, i8*, , , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i16(,,,,,,, i8*, , , i32, i32) define @test_vloxseg7_nxv1i8_nxv1i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i8_nxv1i16: @@ -682,18 +682,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i8(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i8(,,,,,,,, i8*, , , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i8(,,,,,,,, i8*, , , i32, i32) define @test_vloxseg8_nxv1i8_nxv1i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i8_nxv1i8: @@ -719,18 +719,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i32(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i32(,,,,,,,, i8*, , , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i32(,,,,,,,, i8*, , , i32, i32) define @test_vloxseg8_nxv1i8_nxv1i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i8_nxv1i32: @@ -756,18 +756,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i16(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i16(,,,,,,,, i8*, , , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i16(,,,,,,,, i8*, , , i32, i32) define @test_vloxseg8_nxv1i8_nxv1i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i8_nxv1i16: @@ -793,18 +793,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i16(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i16(,, i8*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i16(,, i8*, , , i32, i32) define @test_vloxseg2_nxv16i8_nxv16i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16i8_nxv16i16: @@ -823,18 +823,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i16( %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i16( %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i8(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i8(,, i8*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i8(,, i8*, , , i32, i32) define @test_vloxseg2_nxv16i8_nxv16i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16i8_nxv16i8: @@ -853,18 +853,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i8( %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i8( %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i32(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i32(,, i8*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i32(,, i8*, , , i32, i32) define @test_vloxseg2_nxv16i8_nxv16i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16i8_nxv16i32: @@ -883,18 +883,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i32( %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i32( %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i16(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i16(,,, i8*, , , i32) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i16(,,, i8*, , , i32, i32) define @test_vloxseg3_nxv16i8_nxv16i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv16i8_nxv16i16: @@ -914,18 +914,18 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v6, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i8(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i8(,,, i8*, , , i32) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i8(,,, i8*, , , i32, i32) define @test_vloxseg3_nxv16i8_nxv16i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv16i8_nxv16i8: @@ -946,18 +946,18 @@ ; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i32(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i32(,,, i8*, , , i32) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i32(,,, i8*, , , i32, i32) define @test_vloxseg3_nxv16i8_nxv16i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv16i8_nxv16i32: @@ -977,18 +977,18 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v6, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i16(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i16(,,,, i8*, , , i32) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i16(,,,, i8*, , , i32, i32) define @test_vloxseg4_nxv16i8_nxv16i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv16i8_nxv16i16: @@ -1010,18 +1010,18 @@ ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v16, (a0), v12, v0.t ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i8(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i8(,,,, i8*, , , i32) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i8(,,,, i8*, , , i32, i32) define @test_vloxseg4_nxv16i8_nxv16i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv16i8_nxv16i8: @@ -1043,18 +1043,18 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i32(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i32(,,,, i8*, , , i32) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i32(,,,, i8*, , , i32, i32) define @test_vloxseg4_nxv16i8_nxv16i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv16i8_nxv16i32: @@ -1075,18 +1075,18 @@ ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v6, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i32(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i32(,, i32*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i32(,, i32*, , , i32, i32) define @test_vloxseg2_nxv2i32_nxv2i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i32_nxv2i32: @@ -1105,18 +1105,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i32( %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i32( %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i8(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i8(,, i32*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i8(,, i32*, , , i32, i32) define @test_vloxseg2_nxv2i32_nxv2i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i32_nxv2i8: @@ -1135,18 +1135,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i8( %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i8( %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i16(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i16(,, i32*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i16(,, i32*, , , i32, i32) define @test_vloxseg2_nxv2i32_nxv2i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i32_nxv2i16: @@ -1165,18 +1165,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i16( %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i16( %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i32(i32*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i32(,,, i32*, , , i32) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i32(,,, i32*, , , i32, i32) define @test_vloxseg3_nxv2i32_nxv2i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i32_nxv2i32: @@ -1197,18 +1197,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i32( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i32( %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i8(i32*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i8(,,, i32*, , , i32) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i8(,,, i32*, , , i32, i32) define @test_vloxseg3_nxv2i32_nxv2i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i32_nxv2i8: @@ -1229,18 +1229,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i8( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i8( %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i16(i32*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i16(,,, i32*, , , i32) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i16(,,, i32*, , , i32, i32) define @test_vloxseg3_nxv2i32_nxv2i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i32_nxv2i16: @@ -1261,18 +1261,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i16( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i16( %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i32(i32*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i32(,,,, i32*, , , i32) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i32(,,,, i32*, , , i32, i32) define @test_vloxseg4_nxv2i32_nxv2i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i32_nxv2i32: @@ -1294,18 +1294,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i8(i32*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i8(,,,, i32*, , , i32) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i8(,,,, i32*, , , i32, i32) define @test_vloxseg4_nxv2i32_nxv2i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i32_nxv2i8: @@ -1327,18 +1327,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i16(i32*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i16(,,,, i32*, , , i32) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i16(,,,, i32*, , , i32, i32) define @test_vloxseg4_nxv2i32_nxv2i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i32_nxv2i16: @@ -1360,18 +1360,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i32(i32*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i32(,,,,, i32*, , , i32) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i32(,,,,, i32*, , , i32, i32) define @test_vloxseg5_nxv2i32_nxv2i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i32_nxv2i32: @@ -1394,18 +1394,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i8(i32*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i8(,,,,, i32*, , , i32) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i8(,,,,, i32*, , , i32, i32) define @test_vloxseg5_nxv2i32_nxv2i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i32_nxv2i8: @@ -1428,18 +1428,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i16(i32*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i16(,,,,, i32*, , , i32) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i16(,,,,, i32*, , , i32, i32) define @test_vloxseg5_nxv2i32_nxv2i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i32_nxv2i16: @@ -1462,18 +1462,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i32(i32*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i32(,,,,,, i32*, , , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i32(,,,,,, i32*, , , i32, i32) define @test_vloxseg6_nxv2i32_nxv2i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i32_nxv2i32: @@ -1497,18 +1497,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i8(i32*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i8(,,,,,, i32*, , , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i8(,,,,,, i32*, , , i32, i32) define @test_vloxseg6_nxv2i32_nxv2i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i32_nxv2i8: @@ -1532,18 +1532,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i16(i32*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i16(,,,,,, i32*, , , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i16(,,,,,, i32*, , , i32, i32) define @test_vloxseg6_nxv2i32_nxv2i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i32_nxv2i16: @@ -1567,18 +1567,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i32(i32*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i32(,,,,,,, i32*, , , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i32(,,,,,,, i32*, , , i32, i32) define @test_vloxseg7_nxv2i32_nxv2i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i32_nxv2i32: @@ -1603,18 +1603,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i8(i32*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i8(,,,,,,, i32*, , , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i8(,,,,,,, i32*, , , i32, i32) define @test_vloxseg7_nxv2i32_nxv2i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i32_nxv2i8: @@ -1639,18 +1639,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i16(i32*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i16(,,,,,,, i32*, , , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i16(,,,,,,, i32*, , , i32, i32) define @test_vloxseg7_nxv2i32_nxv2i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i32_nxv2i16: @@ -1675,18 +1675,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i32(i32*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i32(,,,,,,,, i32*, , , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i32(,,,,,,,, i32*, , , i32, i32) define @test_vloxseg8_nxv2i32_nxv2i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i32_nxv2i32: @@ -1712,18 +1712,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i8(i32*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i8(,,,,,,,, i32*, , , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i8(,,,,,,,, i32*, , , i32, i32) define @test_vloxseg8_nxv2i32_nxv2i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i32_nxv2i8: @@ -1749,18 +1749,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i16(i32*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i16(,,,,,,,, i32*, , , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i16(,,,,,,,, i32*, , , i32, i32) define @test_vloxseg8_nxv2i32_nxv2i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i32_nxv2i16: @@ -1786,18 +1786,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i16(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i16(,, i16*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i16(,, i16*, , , i32, i32) define @test_vloxseg2_nxv4i16_nxv4i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i16_nxv4i16: @@ -1816,18 +1816,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i16( %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i16( %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i8(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i8(,, i16*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i8(,, i16*, , , i32, i32) define @test_vloxseg2_nxv4i16_nxv4i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i16_nxv4i8: @@ -1846,18 +1846,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i8( %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i8( %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i32(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i32(,, i16*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i32(,, i16*, , , i32, i32) define @test_vloxseg2_nxv4i16_nxv4i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i16_nxv4i32: @@ -1876,18 +1876,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i32( %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i32( %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i16(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i16(,,, i16*, , , i32) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i16(,,, i16*, , , i32, i32) define @test_vloxseg3_nxv4i16_nxv4i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i16_nxv4i16: @@ -1908,18 +1908,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i8(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i8(,,, i16*, , , i32) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i8(,,, i16*, , , i32, i32) define @test_vloxseg3_nxv4i16_nxv4i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i16_nxv4i8: @@ -1940,18 +1940,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i32(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i32(,,, i16*, , , i32) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i32(,,, i16*, , , i32, i32) define @test_vloxseg3_nxv4i16_nxv4i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i16_nxv4i32: @@ -1971,18 +1971,18 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i32( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i32( %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i16(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i16(,,,, i16*, , , i32) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i16(,,,, i16*, , , i32, i32) define @test_vloxseg4_nxv4i16_nxv4i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i16_nxv4i16: @@ -2004,18 +2004,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i8(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i8(,,,, i16*, , , i32) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i8(,,,, i16*, , , i32, i32) define @test_vloxseg4_nxv4i16_nxv4i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i16_nxv4i8: @@ -2037,18 +2037,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i32(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i32(,,,, i16*, , , i32) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i32(,,,, i16*, , , i32, i32) define @test_vloxseg4_nxv4i16_nxv4i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i16_nxv4i32: @@ -2070,18 +2070,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i16(i16*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i16(,,,,, i16*, , , i32) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i16(,,,,, i16*, , , i32, i32) define @test_vloxseg5_nxv4i16_nxv4i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4i16_nxv4i16: @@ -2104,18 +2104,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i8(i16*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i8(,,,,, i16*, , , i32) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i8(,,,,, i16*, , , i32, i32) define @test_vloxseg5_nxv4i16_nxv4i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4i16_nxv4i8: @@ -2138,18 +2138,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i32(i16*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i32(,,,,, i16*, , , i32) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i32(,,,,, i16*, , , i32, i32) define @test_vloxseg5_nxv4i16_nxv4i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4i16_nxv4i32: @@ -2172,18 +2172,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i16(i16*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i16(,,,,,, i16*, , , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i16(,,,,,, i16*, , , i32, i32) define @test_vloxseg6_nxv4i16_nxv4i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4i16_nxv4i16: @@ -2207,18 +2207,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i8(i16*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i8(,,,,,, i16*, , , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i8(,,,,,, i16*, , , i32, i32) define @test_vloxseg6_nxv4i16_nxv4i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4i16_nxv4i8: @@ -2242,18 +2242,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i32(i16*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i32(,,,,,, i16*, , , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i32(,,,,,, i16*, , , i32, i32) define @test_vloxseg6_nxv4i16_nxv4i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4i16_nxv4i32: @@ -2277,18 +2277,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i16(i16*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i16(,,,,,,, i16*, , , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i16(,,,,,,, i16*, , , i32, i32) define @test_vloxseg7_nxv4i16_nxv4i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4i16_nxv4i16: @@ -2313,18 +2313,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i8(i16*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i8(,,,,,,, i16*, , , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i8(,,,,,,, i16*, , , i32, i32) define @test_vloxseg7_nxv4i16_nxv4i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4i16_nxv4i8: @@ -2349,18 +2349,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i32(i16*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i32(,,,,,,, i16*, , , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i32(,,,,,,, i16*, , , i32, i32) define @test_vloxseg7_nxv4i16_nxv4i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4i16_nxv4i32: @@ -2385,18 +2385,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i16(i16*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i16(,,,,,,,, i16*, , , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i16(,,,,,,,, i16*, , , i32, i32) define @test_vloxseg8_nxv4i16_nxv4i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4i16_nxv4i16: @@ -2422,18 +2422,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i8(i16*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i8(,,,,,,,, i16*, , , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i8(,,,,,,,, i16*, , , i32, i32) define @test_vloxseg8_nxv4i16_nxv4i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4i16_nxv4i8: @@ -2459,18 +2459,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i32(i16*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i32(,,,,,,,, i16*, , , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i32(,,,,,,,, i16*, , , i32, i32) define @test_vloxseg8_nxv4i16_nxv4i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4i16_nxv4i32: @@ -2496,18 +2496,18 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i8(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i8(,, i32*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i8(,, i32*, , , i32, i32) define @test_vloxseg2_nxv1i32_nxv1i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i32_nxv1i8: @@ -2526,18 +2526,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i8( %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i8( %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i32(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i32(,, i32*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i32(,, i32*, , , i32, i32) define @test_vloxseg2_nxv1i32_nxv1i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i32_nxv1i32: @@ -2556,18 +2556,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i32( %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i32( %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i16(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i16(,, i32*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i16(,, i32*, , , i32, i32) define @test_vloxseg2_nxv1i32_nxv1i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i32_nxv1i16: @@ -2586,18 +2586,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i16( %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i16( %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i8(i32*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i8(,,, i32*, , , i32) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i8(,,, i32*, , , i32, i32) define @test_vloxseg3_nxv1i32_nxv1i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i32_nxv1i8: @@ -2618,18 +2618,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i8( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i8( %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i32(i32*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i32(,,, i32*, , , i32) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i32(,,, i32*, , , i32, i32) define @test_vloxseg3_nxv1i32_nxv1i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i32_nxv1i32: @@ -2650,18 +2650,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i32( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i32( %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i16(i32*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i16(,,, i32*, , , i32) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i16(,,, i32*, , , i32, i32) define @test_vloxseg3_nxv1i32_nxv1i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i32_nxv1i16: @@ -2682,18 +2682,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i16( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i16( %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i8(i32*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i8(,,,, i32*, , , i32) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i8(,,,, i32*, , , i32, i32) define @test_vloxseg4_nxv1i32_nxv1i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i32_nxv1i8: @@ -2715,18 +2715,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i32(i32*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i32(,,,, i32*, , , i32) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i32(,,,, i32*, , , i32, i32) define @test_vloxseg4_nxv1i32_nxv1i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i32_nxv1i32: @@ -2748,18 +2748,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i16(i32*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i16(,,,, i32*, , , i32) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i16(,,,, i32*, , , i32, i32) define @test_vloxseg4_nxv1i32_nxv1i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i32_nxv1i16: @@ -2781,18 +2781,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i8(i32*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i8(,,,,, i32*, , , i32) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i8(,,,,, i32*, , , i32, i32) define @test_vloxseg5_nxv1i32_nxv1i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i32_nxv1i8: @@ -2815,18 +2815,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i32(i32*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i32(,,,,, i32*, , , i32) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i32(,,,,, i32*, , , i32, i32) define @test_vloxseg5_nxv1i32_nxv1i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i32_nxv1i32: @@ -2849,18 +2849,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i16(i32*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i16(,,,,, i32*, , , i32) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i16(,,,,, i32*, , , i32, i32) define @test_vloxseg5_nxv1i32_nxv1i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i32_nxv1i16: @@ -2883,18 +2883,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i8(i32*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i8(,,,,,, i32*, , , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i8(,,,,,, i32*, , , i32, i32) define @test_vloxseg6_nxv1i32_nxv1i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i32_nxv1i8: @@ -2918,18 +2918,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i32(i32*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i32(,,,,,, i32*, , , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i32(,,,,,, i32*, , , i32, i32) define @test_vloxseg6_nxv1i32_nxv1i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i32_nxv1i32: @@ -2953,18 +2953,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i16(i32*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i16(,,,,,, i32*, , , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i16(,,,,,, i32*, , , i32, i32) define @test_vloxseg6_nxv1i32_nxv1i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i32_nxv1i16: @@ -2988,18 +2988,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i8(i32*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i8(,,,,,,, i32*, , , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i8(,,,,,,, i32*, , , i32, i32) define @test_vloxseg7_nxv1i32_nxv1i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i32_nxv1i8: @@ -3024,18 +3024,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i32(i32*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i32(,,,,,,, i32*, , , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i32(,,,,,,, i32*, , , i32, i32) define @test_vloxseg7_nxv1i32_nxv1i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i32_nxv1i32: @@ -3060,18 +3060,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i16(i32*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i16(,,,,,,, i32*, , , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i16(,,,,,,, i32*, , , i32, i32) define @test_vloxseg7_nxv1i32_nxv1i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i32_nxv1i16: @@ -3096,18 +3096,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i8(i32*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i8(,,,,,,,, i32*, , , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i8(,,,,,,,, i32*, , , i32, i32) define @test_vloxseg8_nxv1i32_nxv1i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i32_nxv1i8: @@ -3133,18 +3133,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i32(i32*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i32(,,,,,,,, i32*, , , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i32(,,,,,,,, i32*, , , i32, i32) define @test_vloxseg8_nxv1i32_nxv1i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i32_nxv1i32: @@ -3170,18 +3170,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i16(i32*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i16(,,,,,,,, i32*, , , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i16(,,,,,,,, i32*, , , i32, i32) define @test_vloxseg8_nxv1i32_nxv1i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i32_nxv1i16: @@ -3207,18 +3207,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i16(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i16(,, i16*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i16(,, i16*, , , i32, i32) define @test_vloxseg2_nxv8i16_nxv8i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i16_nxv8i16: @@ -3237,18 +3237,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i16( %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i16( %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i8(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i8(,, i16*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i8(,, i16*, , , i32, i32) define @test_vloxseg2_nxv8i16_nxv8i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i16_nxv8i8: @@ -3267,18 +3267,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i8( %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i8( %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i32(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i32(,, i16*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i32(,, i16*, , , i32, i32) define @test_vloxseg2_nxv8i16_nxv8i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i16_nxv8i32: @@ -3297,18 +3297,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i32( %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i32( %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i16(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i16(,,, i16*, , , i32) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i16(,,, i16*, , , i32, i32) define @test_vloxseg3_nxv8i16_nxv8i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8i16_nxv8i16: @@ -3329,18 +3329,18 @@ ; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i8(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i8(,,, i16*, , , i32) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i8(,,, i16*, , , i32, i32) define @test_vloxseg3_nxv8i16_nxv8i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8i16_nxv8i8: @@ -3361,18 +3361,18 @@ ; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i32(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i32(,,, i16*, , , i32) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i32(,,, i16*, , , i32, i32) define @test_vloxseg3_nxv8i16_nxv8i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8i16_nxv8i32: @@ -3392,18 +3392,18 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v6, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i32( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i32( %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i16(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i16(,,,, i16*, , , i32) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i16(,,,, i16*, , , i32, i32) define @test_vloxseg4_nxv8i16_nxv8i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8i16_nxv8i16: @@ -3425,18 +3425,18 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i8(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i8(,,,, i16*, , , i32) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i8(,,,, i16*, , , i32, i32) define @test_vloxseg4_nxv8i16_nxv8i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8i16_nxv8i8: @@ -3458,18 +3458,18 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i32(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i32(,,,, i16*, , , i32) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i32(,,,, i16*, , , i32, i32) define @test_vloxseg4_nxv8i16_nxv8i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8i16_nxv8i32: @@ -3491,18 +3491,18 @@ ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i16(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i16(,, i8*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i16(,, i8*, , , i32, i32) define @test_vloxseg2_nxv8i8_nxv8i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i8_nxv8i16: @@ -3521,18 +3521,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i16( %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i16( %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i8(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i8(,, i8*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i8(,, i8*, , , i32, i32) define @test_vloxseg2_nxv8i8_nxv8i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i8_nxv8i8: @@ -3551,18 +3551,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i8( %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i8( %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i32(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i32(,, i8*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i32(,, i8*, , , i32, i32) define @test_vloxseg2_nxv8i8_nxv8i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i8_nxv8i32: @@ -3581,18 +3581,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i32( %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i32( %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i16(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i16(,,, i8*, , , i32) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i16(,,, i8*, , , i32, i32) define @test_vloxseg3_nxv8i8_nxv8i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8i8_nxv8i16: @@ -3612,18 +3612,18 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i8(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i8(,,, i8*, , , i32) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i8(,,, i8*, , , i32, i32) define @test_vloxseg3_nxv8i8_nxv8i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8i8_nxv8i8: @@ -3644,18 +3644,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i32(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i32(,,, i8*, , , i32) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i32(,,, i8*, , , i32, i32) define @test_vloxseg3_nxv8i8_nxv8i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8i8_nxv8i32: @@ -3675,18 +3675,18 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i16(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i16(,,,, i8*, , , i32) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i16(,,,, i8*, , , i32, i32) define @test_vloxseg4_nxv8i8_nxv8i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8i8_nxv8i16: @@ -3708,18 +3708,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i8(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i8(,,,, i8*, , , i32) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i8(,,,, i8*, , , i32, i32) define @test_vloxseg4_nxv8i8_nxv8i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8i8_nxv8i8: @@ -3741,18 +3741,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i32(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i32(,,,, i8*, , , i32) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i32(,,,, i8*, , , i32, i32) define @test_vloxseg4_nxv8i8_nxv8i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8i8_nxv8i32: @@ -3773,18 +3773,18 @@ ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i16(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i16(,,,,, i8*, , , i32) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i16(,,,,, i8*, , , i32, i32) define @test_vloxseg5_nxv8i8_nxv8i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv8i8_nxv8i16: @@ -3807,18 +3807,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i8(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i8(,,,,, i8*, , , i32) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i8(,,,,, i8*, , , i32, i32) define @test_vloxseg5_nxv8i8_nxv8i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv8i8_nxv8i8: @@ -3841,18 +3841,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i32(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i32(,,,,, i8*, , , i32) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i32(,,,,, i8*, , , i32, i32) define @test_vloxseg5_nxv8i8_nxv8i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv8i8_nxv8i32: @@ -3874,18 +3874,18 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i16(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i16(,,,,,, i8*, , , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i16(,,,,,, i8*, , , i32, i32) define @test_vloxseg6_nxv8i8_nxv8i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv8i8_nxv8i16: @@ -3909,18 +3909,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i8(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i8(,,,,,, i8*, , , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i8(,,,,,, i8*, , , i32, i32) define @test_vloxseg6_nxv8i8_nxv8i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv8i8_nxv8i8: @@ -3944,18 +3944,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i32(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i32(,,,,,, i8*, , , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i32(,,,,,, i8*, , , i32, i32) define @test_vloxseg6_nxv8i8_nxv8i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv8i8_nxv8i32: @@ -3979,18 +3979,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v12, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i16(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i16(,,,,,,, i8*, , , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i16(,,,,,,, i8*, , , i32, i32) define @test_vloxseg7_nxv8i8_nxv8i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv8i8_nxv8i16: @@ -4015,18 +4015,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i8(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i8(,,,,,,, i8*, , , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i8(,,,,,,, i8*, , , i32, i32) define @test_vloxseg7_nxv8i8_nxv8i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv8i8_nxv8i8: @@ -4051,18 +4051,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i32(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i32(,,,,,,, i8*, , , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i32(,,,,,,, i8*, , , i32, i32) define @test_vloxseg7_nxv8i8_nxv8i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv8i8_nxv8i32: @@ -4087,18 +4087,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v12, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i16(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i16(,,,,,,,, i8*, , , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i16(,,,,,,,, i8*, , , i32, i32) define @test_vloxseg8_nxv8i8_nxv8i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv8i8_nxv8i16: @@ -4124,18 +4124,18 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i8(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i8(,,,,,,,, i8*, , , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i8(,,,,,,,, i8*, , , i32, i32) define @test_vloxseg8_nxv8i8_nxv8i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv8i8_nxv8i8: @@ -4161,18 +4161,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i32(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i32(,,,,,,,, i8*, , , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i32(,,,,,,,, i8*, , , i32, i32) define @test_vloxseg8_nxv8i8_nxv8i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv8i8_nxv8i32: @@ -4198,18 +4198,18 @@ ; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vmv1r.v v22, v16 ; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i16(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i16(,, i32*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i16(,, i32*, , , i32, i32) define @test_vloxseg2_nxv8i32_nxv8i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i32_nxv8i16: @@ -4228,18 +4228,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i16( %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i16( %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i8(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i8(,, i32*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i8(,, i32*, , , i32, i32) define @test_vloxseg2_nxv8i32_nxv8i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i32_nxv8i8: @@ -4258,18 +4258,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv8i32_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i8( %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i8( %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i32(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i32(,, i32*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i32(,, i32*, , , i32, i32) define @test_vloxseg2_nxv8i32_nxv8i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i32_nxv8i32: @@ -4288,18 +4288,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i32( %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i32( %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i16(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i16(,, i8*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i16(,, i8*, , , i32, i32) define @test_vloxseg2_nxv4i8_nxv4i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i8_nxv4i16: @@ -4318,18 +4318,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i16( %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i16( %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i8(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i8(,, i8*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i8(,, i8*, , , i32, i32) define @test_vloxseg2_nxv4i8_nxv4i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i8_nxv4i8: @@ -4348,18 +4348,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i8( %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i8( %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i32(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i32(,, i8*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i32(,, i8*, , , i32, i32) define @test_vloxseg2_nxv4i8_nxv4i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i8_nxv4i32: @@ -4378,18 +4378,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i32( %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i32( %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i16(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i16(,,, i8*, , , i32) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i16(,,, i8*, , , i32, i32) define @test_vloxseg3_nxv4i8_nxv4i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i8_nxv4i16: @@ -4410,18 +4410,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i8(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i8(,,, i8*, , , i32) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i8(,,, i8*, , , i32, i32) define @test_vloxseg3_nxv4i8_nxv4i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i8_nxv4i8: @@ -4442,18 +4442,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i32(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i32(,,, i8*, , , i32) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i32(,,, i8*, , , i32, i32) define @test_vloxseg3_nxv4i8_nxv4i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i8_nxv4i32: @@ -4473,18 +4473,18 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i16(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i16(,,,, i8*, , , i32) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i16(,,,, i8*, , , i32, i32) define @test_vloxseg4_nxv4i8_nxv4i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i8_nxv4i16: @@ -4506,18 +4506,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i8(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i8(,,,, i8*, , , i32) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i8(,,,, i8*, , , i32, i32) define @test_vloxseg4_nxv4i8_nxv4i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i8_nxv4i8: @@ -4539,18 +4539,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i32(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i32(,,,, i8*, , , i32) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i32(,,,, i8*, , , i32, i32) define @test_vloxseg4_nxv4i8_nxv4i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i8_nxv4i32: @@ -4572,18 +4572,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i16(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i16(,,,,, i8*, , , i32) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i16(,,,,, i8*, , , i32, i32) define @test_vloxseg5_nxv4i8_nxv4i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4i8_nxv4i16: @@ -4606,18 +4606,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i8(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i8(,,,,, i8*, , , i32) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i8(,,,,, i8*, , , i32, i32) define @test_vloxseg5_nxv4i8_nxv4i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4i8_nxv4i8: @@ -4640,18 +4640,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i32(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i32(,,,,, i8*, , , i32) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i32(,,,,, i8*, , , i32, i32) define @test_vloxseg5_nxv4i8_nxv4i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4i8_nxv4i32: @@ -4674,18 +4674,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i16(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i16(,,,,,, i8*, , , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i16(,,,,,, i8*, , , i32, i32) define @test_vloxseg6_nxv4i8_nxv4i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4i8_nxv4i16: @@ -4709,18 +4709,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i8(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i8(,,,,,, i8*, , , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i8(,,,,,, i8*, , , i32, i32) define @test_vloxseg6_nxv4i8_nxv4i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4i8_nxv4i8: @@ -4744,18 +4744,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i32(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i32(,,,,,, i8*, , , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i32(,,,,,, i8*, , , i32, i32) define @test_vloxseg6_nxv4i8_nxv4i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4i8_nxv4i32: @@ -4779,18 +4779,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i16(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i16(,,,,,,, i8*, , , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i16(,,,,,,, i8*, , , i32, i32) define @test_vloxseg7_nxv4i8_nxv4i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4i8_nxv4i16: @@ -4815,18 +4815,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i8(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i8(,,,,,,, i8*, , , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i8(,,,,,,, i8*, , , i32, i32) define @test_vloxseg7_nxv4i8_nxv4i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4i8_nxv4i8: @@ -4851,18 +4851,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i32(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i32(,,,,,,, i8*, , , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i32(,,,,,,, i8*, , , i32, i32) define @test_vloxseg7_nxv4i8_nxv4i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4i8_nxv4i32: @@ -4887,18 +4887,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i16(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i16(,,,,,,,, i8*, , , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i16(,,,,,,,, i8*, , , i32, i32) define @test_vloxseg8_nxv4i8_nxv4i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4i8_nxv4i16: @@ -4924,18 +4924,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i8(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i8(,,,,,,,, i8*, , , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i8(,,,,,,,, i8*, , , i32, i32) define @test_vloxseg8_nxv4i8_nxv4i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4i8_nxv4i8: @@ -4961,18 +4961,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i32(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i32(,,,,,,,, i8*, , , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i32(,,,,,,,, i8*, , , i32, i32) define @test_vloxseg8_nxv4i8_nxv4i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4i8_nxv4i32: @@ -4998,18 +4998,18 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i8(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i8(,, i16*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i8(,, i16*, , , i32, i32) define @test_vloxseg2_nxv1i16_nxv1i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i16_nxv1i8: @@ -5028,18 +5028,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i8( %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i8( %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i32(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i32(,, i16*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i32(,, i16*, , , i32, i32) define @test_vloxseg2_nxv1i16_nxv1i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i16_nxv1i32: @@ -5058,18 +5058,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i32( %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i32( %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i16(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i16(,, i16*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i16(,, i16*, , , i32, i32) define @test_vloxseg2_nxv1i16_nxv1i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i16_nxv1i16: @@ -5088,18 +5088,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i16( %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i16( %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i8(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i8(,,, i16*, , , i32) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i8(,,, i16*, , , i32, i32) define @test_vloxseg3_nxv1i16_nxv1i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i16_nxv1i8: @@ -5120,18 +5120,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i32(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i32(,,, i16*, , , i32) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i32(,,, i16*, , , i32, i32) define @test_vloxseg3_nxv1i16_nxv1i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i16_nxv1i32: @@ -5152,18 +5152,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i32( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i32( %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i16(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i16(,,, i16*, , , i32) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i16(,,, i16*, , , i32, i32) define @test_vloxseg3_nxv1i16_nxv1i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i16_nxv1i16: @@ -5184,18 +5184,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i8(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i8(,,,, i16*, , , i32) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i8(,,,, i16*, , , i32, i32) define @test_vloxseg4_nxv1i16_nxv1i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i16_nxv1i8: @@ -5217,18 +5217,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i32(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i32(,,,, i16*, , , i32) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i32(,,,, i16*, , , i32, i32) define @test_vloxseg4_nxv1i16_nxv1i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i16_nxv1i32: @@ -5250,18 +5250,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i16(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i16(,,,, i16*, , , i32) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i16(,,,, i16*, , , i32, i32) define @test_vloxseg4_nxv1i16_nxv1i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i16_nxv1i16: @@ -5283,18 +5283,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i8(i16*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i8(,,,,, i16*, , , i32) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i8(,,,,, i16*, , , i32, i32) define @test_vloxseg5_nxv1i16_nxv1i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i16_nxv1i8: @@ -5317,18 +5317,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i32(i16*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i32(,,,,, i16*, , , i32) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i32(,,,,, i16*, , , i32, i32) define @test_vloxseg5_nxv1i16_nxv1i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i16_nxv1i32: @@ -5351,18 +5351,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i16(i16*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i16(,,,,, i16*, , , i32) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i16(,,,,, i16*, , , i32, i32) define @test_vloxseg5_nxv1i16_nxv1i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i16_nxv1i16: @@ -5385,18 +5385,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i8(i16*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i8(,,,,,, i16*, , , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i8(,,,,,, i16*, , , i32, i32) define @test_vloxseg6_nxv1i16_nxv1i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i16_nxv1i8: @@ -5420,18 +5420,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i32(i16*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i32(,,,,,, i16*, , , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i32(,,,,,, i16*, , , i32, i32) define @test_vloxseg6_nxv1i16_nxv1i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i16_nxv1i32: @@ -5455,18 +5455,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i16(i16*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i16(,,,,,, i16*, , , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i16(,,,,,, i16*, , , i32, i32) define @test_vloxseg6_nxv1i16_nxv1i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i16_nxv1i16: @@ -5490,18 +5490,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i8(i16*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i8(,,,,,,, i16*, , , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i8(,,,,,,, i16*, , , i32, i32) define @test_vloxseg7_nxv1i16_nxv1i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i16_nxv1i8: @@ -5526,18 +5526,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i32(i16*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i32(,,,,,,, i16*, , , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i32(,,,,,,, i16*, , , i32, i32) define @test_vloxseg7_nxv1i16_nxv1i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i16_nxv1i32: @@ -5562,18 +5562,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i16(i16*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i16(,,,,,,, i16*, , , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i16(,,,,,,, i16*, , , i32, i32) define @test_vloxseg7_nxv1i16_nxv1i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i16_nxv1i16: @@ -5598,18 +5598,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i8(i16*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i8(,,,,,,,, i16*, , , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i8(,,,,,,,, i16*, , , i32, i32) define @test_vloxseg8_nxv1i16_nxv1i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i16_nxv1i8: @@ -5635,18 +5635,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i32(i16*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i32(,,,,,,,, i16*, , , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i32(,,,,,,,, i16*, , , i32, i32) define @test_vloxseg8_nxv1i16_nxv1i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i16_nxv1i32: @@ -5672,18 +5672,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i16(i16*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i16(,,,,,,,, i16*, , , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i16(,,,,,,,, i16*, , , i32, i32) define @test_vloxseg8_nxv1i16_nxv1i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i16_nxv1i16: @@ -5709,18 +5709,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv32i8.nxv32i16(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i16(,, i8*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i16(,, i8*, , , i32, i32) define @test_vloxseg2_nxv32i8_nxv32i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv32i8_nxv32i16: @@ -5739,18 +5739,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i16( %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i16( %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv32i8.nxv32i8(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i8(,, i8*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i8(,, i8*, , , i32, i32) define @test_vloxseg2_nxv32i8_nxv32i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv32i8_nxv32i8: @@ -5769,18 +5769,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i8( %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i8( %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i32(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i32(,, i8*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i32(,, i8*, , , i32, i32) define @test_vloxseg2_nxv2i8_nxv2i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i8_nxv2i32: @@ -5799,18 +5799,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i32( %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i32( %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i8(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i8(,, i8*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i8(,, i8*, , , i32, i32) define @test_vloxseg2_nxv2i8_nxv2i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i8_nxv2i8: @@ -5829,18 +5829,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i8( %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i8( %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i16(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i16(,, i8*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i16(,, i8*, , , i32, i32) define @test_vloxseg2_nxv2i8_nxv2i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i8_nxv2i16: @@ -5859,18 +5859,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i16( %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i16( %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i32(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i32(,,, i8*, , , i32) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i32(,,, i8*, , , i32, i32) define @test_vloxseg3_nxv2i8_nxv2i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i8_nxv2i32: @@ -5891,18 +5891,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i8(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i8(,,, i8*, , , i32) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i8(,,, i8*, , , i32, i32) define @test_vloxseg3_nxv2i8_nxv2i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i8_nxv2i8: @@ -5923,18 +5923,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i16(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i16(,,, i8*, , , i32) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i16(,,, i8*, , , i32, i32) define @test_vloxseg3_nxv2i8_nxv2i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i8_nxv2i16: @@ -5955,18 +5955,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i32(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i32(,,,, i8*, , , i32) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i32(,,,, i8*, , , i32, i32) define @test_vloxseg4_nxv2i8_nxv2i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i8_nxv2i32: @@ -5988,18 +5988,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i8(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i8(,,,, i8*, , , i32) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i8(,,,, i8*, , , i32, i32) define @test_vloxseg4_nxv2i8_nxv2i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i8_nxv2i8: @@ -6021,18 +6021,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i16(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i16(,,,, i8*, , , i32) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i16(,,,, i8*, , , i32, i32) define @test_vloxseg4_nxv2i8_nxv2i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i8_nxv2i16: @@ -6054,18 +6054,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i32(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i32(,,,,, i8*, , , i32) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i32(,,,,, i8*, , , i32, i32) define @test_vloxseg5_nxv2i8_nxv2i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i8_nxv2i32: @@ -6088,18 +6088,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i8(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i8(,,,,, i8*, , , i32) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i8(,,,,, i8*, , , i32, i32) define @test_vloxseg5_nxv2i8_nxv2i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i8_nxv2i8: @@ -6122,18 +6122,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i16(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i16(,,,,, i8*, , , i32) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i16(,,,,, i8*, , , i32, i32) define @test_vloxseg5_nxv2i8_nxv2i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i8_nxv2i16: @@ -6156,18 +6156,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i32(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i32(,,,,,, i8*, , , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i32(,,,,,, i8*, , , i32, i32) define @test_vloxseg6_nxv2i8_nxv2i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i8_nxv2i32: @@ -6191,18 +6191,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i8(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i8(,,,,,, i8*, , , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i8(,,,,,, i8*, , , i32, i32) define @test_vloxseg6_nxv2i8_nxv2i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i8_nxv2i8: @@ -6226,18 +6226,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i16(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i16(,,,,,, i8*, , , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i16(,,,,,, i8*, , , i32, i32) define @test_vloxseg6_nxv2i8_nxv2i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i8_nxv2i16: @@ -6261,18 +6261,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i32(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i32(,,,,,,, i8*, , , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i32(,,,,,,, i8*, , , i32, i32) define @test_vloxseg7_nxv2i8_nxv2i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i8_nxv2i32: @@ -6297,18 +6297,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i8(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i8(,,,,,,, i8*, , , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i8(,,,,,,, i8*, , , i32, i32) define @test_vloxseg7_nxv2i8_nxv2i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i8_nxv2i8: @@ -6333,18 +6333,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i16(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i16(,,,,,,, i8*, , , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i16(,,,,,,, i8*, , , i32, i32) define @test_vloxseg7_nxv2i8_nxv2i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i8_nxv2i16: @@ -6369,18 +6369,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i32(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i32(,,,,,,,, i8*, , , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i32(,,,,,,,, i8*, , , i32, i32) define @test_vloxseg8_nxv2i8_nxv2i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i8_nxv2i32: @@ -6406,18 +6406,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i8(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i8(,,,,,,,, i8*, , , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i8(,,,,,,,, i8*, , , i32, i32) define @test_vloxseg8_nxv2i8_nxv2i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i8_nxv2i8: @@ -6443,18 +6443,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i16(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i16(,,,,,,,, i8*, , , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i16(,,,,,,,, i8*, , , i32, i32) define @test_vloxseg8_nxv2i8_nxv2i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i8_nxv2i16: @@ -6480,18 +6480,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i32(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i32(,, i16*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i32(,, i16*, , , i32, i32) define @test_vloxseg2_nxv2i16_nxv2i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i16_nxv2i32: @@ -6510,18 +6510,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i32( %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i32( %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i8(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i8(,, i16*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i8(,, i16*, , , i32, i32) define @test_vloxseg2_nxv2i16_nxv2i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i16_nxv2i8: @@ -6540,18 +6540,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i8( %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i8( %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i16(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i16(,, i16*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i16(,, i16*, , , i32, i32) define @test_vloxseg2_nxv2i16_nxv2i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i16_nxv2i16: @@ -6570,18 +6570,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i16( %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i16( %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i32(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i32(,,, i16*, , , i32) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i32(,,, i16*, , , i32, i32) define @test_vloxseg3_nxv2i16_nxv2i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i16_nxv2i32: @@ -6602,18 +6602,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i32( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i32( %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i8(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i8(,,, i16*, , , i32) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i8(,,, i16*, , , i32, i32) define @test_vloxseg3_nxv2i16_nxv2i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i16_nxv2i8: @@ -6634,18 +6634,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i16(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i16(,,, i16*, , , i32) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i16(,,, i16*, , , i32, i32) define @test_vloxseg3_nxv2i16_nxv2i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i16_nxv2i16: @@ -6666,18 +6666,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i32(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i32(,,,, i16*, , , i32) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i32(,,,, i16*, , , i32, i32) define @test_vloxseg4_nxv2i16_nxv2i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i16_nxv2i32: @@ -6699,18 +6699,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i8(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i8(,,,, i16*, , , i32) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i8(,,,, i16*, , , i32, i32) define @test_vloxseg4_nxv2i16_nxv2i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i16_nxv2i8: @@ -6732,18 +6732,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i16(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i16(,,,, i16*, , , i32) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i16(,,,, i16*, , , i32, i32) define @test_vloxseg4_nxv2i16_nxv2i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i16_nxv2i16: @@ -6765,18 +6765,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i32(i16*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i32(,,,,, i16*, , , i32) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i32(,,,,, i16*, , , i32, i32) define @test_vloxseg5_nxv2i16_nxv2i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i16_nxv2i32: @@ -6799,18 +6799,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i8(i16*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i8(,,,,, i16*, , , i32) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i8(,,,,, i16*, , , i32, i32) define @test_vloxseg5_nxv2i16_nxv2i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i16_nxv2i8: @@ -6833,18 +6833,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i16(i16*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i16(,,,,, i16*, , , i32) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i16(,,,,, i16*, , , i32, i32) define @test_vloxseg5_nxv2i16_nxv2i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i16_nxv2i16: @@ -6867,18 +6867,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i32(i16*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i32(,,,,,, i16*, , , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i32(,,,,,, i16*, , , i32, i32) define @test_vloxseg6_nxv2i16_nxv2i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i16_nxv2i32: @@ -6902,18 +6902,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i8(i16*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i8(,,,,,, i16*, , , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i8(,,,,,, i16*, , , i32, i32) define @test_vloxseg6_nxv2i16_nxv2i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i16_nxv2i8: @@ -6937,18 +6937,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i16(i16*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i16(,,,,,, i16*, , , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i16(,,,,,, i16*, , , i32, i32) define @test_vloxseg6_nxv2i16_nxv2i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i16_nxv2i16: @@ -6972,18 +6972,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i32(i16*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i32(,,,,,,, i16*, , , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i32(,,,,,,, i16*, , , i32, i32) define @test_vloxseg7_nxv2i16_nxv2i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i16_nxv2i32: @@ -7008,18 +7008,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i8(i16*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i8(,,,,,,, i16*, , , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i8(,,,,,,, i16*, , , i32, i32) define @test_vloxseg7_nxv2i16_nxv2i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i16_nxv2i8: @@ -7044,18 +7044,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i16(i16*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i16(,,,,,,, i16*, , , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i16(,,,,,,, i16*, , , i32, i32) define @test_vloxseg7_nxv2i16_nxv2i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i16_nxv2i16: @@ -7080,18 +7080,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i32(i16*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i32(,,,,,,,, i16*, , , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i32(,,,,,,,, i16*, , , i32, i32) define @test_vloxseg8_nxv2i16_nxv2i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i16_nxv2i32: @@ -7117,18 +7117,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i8(i16*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i8(,,,,,,,, i16*, , , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i8(,,,,,,,, i16*, , , i32, i32) define @test_vloxseg8_nxv2i16_nxv2i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i16_nxv2i8: @@ -7154,18 +7154,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i16(i16*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i16(,,,,,,,, i16*, , , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i16(,,,,,,,, i16*, , , i32, i32) define @test_vloxseg8_nxv2i16_nxv2i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i16_nxv2i16: @@ -7191,18 +7191,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i16(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i16(,, i32*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i16(,, i32*, , , i32, i32) define @test_vloxseg2_nxv4i32_nxv4i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i32_nxv4i16: @@ -7221,18 +7221,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i16( %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i16( %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i8(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i8(,, i32*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i8(,, i32*, , , i32, i32) define @test_vloxseg2_nxv4i32_nxv4i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i32_nxv4i8: @@ -7251,18 +7251,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i8( %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i8( %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i32(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i32(,, i32*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i32(,, i32*, , , i32, i32) define @test_vloxseg2_nxv4i32_nxv4i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i32_nxv4i32: @@ -7281,18 +7281,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i32( %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i32( %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i16(i32*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i16(,,, i32*, , , i32) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i16(,,, i32*, , , i32, i32) define @test_vloxseg3_nxv4i32_nxv4i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i32_nxv4i16: @@ -7313,18 +7313,18 @@ ; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i16( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i16( %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i8(i32*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i8(,,, i32*, , , i32) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i8(,,, i32*, , , i32, i32) define @test_vloxseg3_nxv4i32_nxv4i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i32_nxv4i8: @@ -7345,18 +7345,18 @@ ; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i8( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i8( %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i32(i32*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i32(,,, i32*, , , i32) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i32(,,, i32*, , , i32, i32) define @test_vloxseg3_nxv4i32_nxv4i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i32_nxv4i32: @@ -7377,18 +7377,18 @@ ; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i32( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i32( %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i16(i32*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i16(,,,, i32*, , , i32) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i16(,,,, i32*, , , i32, i32) define @test_vloxseg4_nxv4i32_nxv4i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i32_nxv4i16: @@ -7410,18 +7410,18 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i16( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i16( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i8(i32*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i8(,,,, i32*, , , i32) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i8(,,,, i32*, , , i32, i32) define @test_vloxseg4_nxv4i32_nxv4i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i32_nxv4i8: @@ -7443,18 +7443,18 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i8( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i8( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i32(i32*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i32(,,,, i32*, , , i32) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i32(,,,, i32*, , , i32, i32) define @test_vloxseg4_nxv4i32_nxv4i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i32_nxv4i32: @@ -7476,18 +7476,18 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i32( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i32( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i16(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i16(,, half*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i16(,, half*, , , i32, i32) define @test_vloxseg2_nxv16f16_nxv16i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16f16_nxv16i16: @@ -7506,18 +7506,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i16( %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i16( %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i8(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i8(,, half*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i8(,, half*, , , i32, i32) define @test_vloxseg2_nxv16f16_nxv16i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16f16_nxv16i8: @@ -7536,18 +7536,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i8( %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i8( %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i32(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i32(,, half*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i32(,, half*, , , i32, i32) define @test_vloxseg2_nxv16f16_nxv16i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16f16_nxv16i32: @@ -7566,18 +7566,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v4, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i32( %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i32( %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i16(double*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i16(,, double*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i16(,, double*, , , i32, i32) define @test_vloxseg2_nxv4f64_nxv4i16(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f64_nxv4i16: @@ -7596,18 +7596,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv4f64_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i16( %val, %val, double* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i16( %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i8(double*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i8(,, double*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i8(,, double*, , , i32, i32) define @test_vloxseg2_nxv4f64_nxv4i8(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f64_nxv4i8: @@ -7626,18 +7626,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv4f64_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i8( %val, %val, double* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i8( %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i32(double*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i32(,, double*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i32(,, double*, , , i32, i32) define @test_vloxseg2_nxv4f64_nxv4i32(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f64_nxv4i32: @@ -7656,18 +7656,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i32( %val, %val, double* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i32( %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i8(double*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i8(,, double*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i8(,, double*, , , i32, i32) define @test_vloxseg2_nxv1f64_nxv1i8(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f64_nxv1i8: @@ -7686,18 +7686,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i8( %val, %val, double* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i8( %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i32(double*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i32(,, double*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i32(,, double*, , , i32, i32) define @test_vloxseg2_nxv1f64_nxv1i32(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f64_nxv1i32: @@ -7716,18 +7716,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i32( %val, %val, double* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i32( %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i16(double*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i16(,, double*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i16(,, double*, , , i32, i32) define @test_vloxseg2_nxv1f64_nxv1i16(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f64_nxv1i16: @@ -7746,18 +7746,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i16( %val, %val, double* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i16( %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i8(double*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i8(,,, double*, , , i32) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i8(,,, double*, , , i32, i32) define @test_vloxseg3_nxv1f64_nxv1i8(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f64_nxv1i8: @@ -7778,18 +7778,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i8( %val, %val, %val, double* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i8( %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i32(double*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i32(,,, double*, , , i32) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i32(,,, double*, , , i32, i32) define @test_vloxseg3_nxv1f64_nxv1i32(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f64_nxv1i32: @@ -7810,18 +7810,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i32( %val, %val, %val, double* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i32( %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i16(double*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i16(,,, double*, , , i32) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i16(,,, double*, , , i32, i32) define @test_vloxseg3_nxv1f64_nxv1i16(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f64_nxv1i16: @@ -7842,18 +7842,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i16( %val, %val, %val, double* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i16( %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i8(double*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i8(,,,, double*, , , i32) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i8(,,,, double*, , , i32, i32) define @test_vloxseg4_nxv1f64_nxv1i8(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f64_nxv1i8: @@ -7875,18 +7875,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i32(double*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i32(,,,, double*, , , i32) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i32(,,,, double*, , , i32, i32) define @test_vloxseg4_nxv1f64_nxv1i32(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f64_nxv1i32: @@ -7908,18 +7908,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i16(double*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i16(,,,, double*, , , i32) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i16(,,,, double*, , , i32, i32) define @test_vloxseg4_nxv1f64_nxv1i16(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f64_nxv1i16: @@ -7941,18 +7941,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i8(double*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i8(,,,,, double*, , , i32) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i8(,,,,, double*, , , i32, i32) define @test_vloxseg5_nxv1f64_nxv1i8(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f64_nxv1i8: @@ -7975,18 +7975,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i32(double*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i32(,,,,, double*, , , i32) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i32(,,,,, double*, , , i32, i32) define @test_vloxseg5_nxv1f64_nxv1i32(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f64_nxv1i32: @@ -8009,18 +8009,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i16(double*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i16(,,,,, double*, , , i32) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i16(,,,,, double*, , , i32, i32) define @test_vloxseg5_nxv1f64_nxv1i16(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f64_nxv1i16: @@ -8043,18 +8043,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i8(double*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i8(,,,,,, double*, , , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i8(,,,,,, double*, , , i32, i32) define @test_vloxseg6_nxv1f64_nxv1i8(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f64_nxv1i8: @@ -8078,18 +8078,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i32(double*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i32(,,,,,, double*, , , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i32(,,,,,, double*, , , i32, i32) define @test_vloxseg6_nxv1f64_nxv1i32(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f64_nxv1i32: @@ -8113,18 +8113,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i16(double*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i16(,,,,,, double*, , , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i16(,,,,,, double*, , , i32, i32) define @test_vloxseg6_nxv1f64_nxv1i16(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f64_nxv1i16: @@ -8148,18 +8148,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i8(double*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i8(,,,,,,, double*, , , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i8(,,,,,,, double*, , , i32, i32) define @test_vloxseg7_nxv1f64_nxv1i8(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f64_nxv1i8: @@ -8184,18 +8184,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i32(double*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i32(,,,,,,, double*, , , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i32(,,,,,,, double*, , , i32, i32) define @test_vloxseg7_nxv1f64_nxv1i32(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f64_nxv1i32: @@ -8220,18 +8220,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i16(double*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i16(,,,,,,, double*, , , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i16(,,,,,,, double*, , , i32, i32) define @test_vloxseg7_nxv1f64_nxv1i16(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f64_nxv1i16: @@ -8256,18 +8256,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i8(double*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i8(,,,,,,,, double*, , , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i8(,,,,,,,, double*, , , i32, i32) define @test_vloxseg8_nxv1f64_nxv1i8(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f64_nxv1i8: @@ -8293,18 +8293,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i32(double*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i32(,,,,,,,, double*, , , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i32(,,,,,,,, double*, , , i32, i32) define @test_vloxseg8_nxv1f64_nxv1i32(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f64_nxv1i32: @@ -8330,18 +8330,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i16(double*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i16(,,,,,,,, double*, , , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i16(,,,,,,,, double*, , , i32, i32) define @test_vloxseg8_nxv1f64_nxv1i16(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f64_nxv1i16: @@ -8367,18 +8367,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i32(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i32(,, float*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i32(,, float*, , , i32, i32) define @test_vloxseg2_nxv2f32_nxv2i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f32_nxv2i32: @@ -8397,18 +8397,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i32( %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i32( %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i8(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i8(,, float*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i8(,, float*, , , i32, i32) define @test_vloxseg2_nxv2f32_nxv2i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f32_nxv2i8: @@ -8427,18 +8427,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i8( %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i8( %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i16(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i16(,, float*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i16(,, float*, , , i32, i32) define @test_vloxseg2_nxv2f32_nxv2i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f32_nxv2i16: @@ -8457,18 +8457,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i16( %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i16( %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i32(float*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i32(,,, float*, , , i32) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i32(,,, float*, , , i32, i32) define @test_vloxseg3_nxv2f32_nxv2i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f32_nxv2i32: @@ -8489,18 +8489,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i32( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i32( %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i8(float*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i8(,,, float*, , , i32) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i8(,,, float*, , , i32, i32) define @test_vloxseg3_nxv2f32_nxv2i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f32_nxv2i8: @@ -8521,18 +8521,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i8( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i8( %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i16(float*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i16(,,, float*, , , i32) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i16(,,, float*, , , i32, i32) define @test_vloxseg3_nxv2f32_nxv2i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f32_nxv2i16: @@ -8553,18 +8553,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i16( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i16( %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i32(float*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i32(,,,, float*, , , i32) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i32(,,,, float*, , , i32, i32) define @test_vloxseg4_nxv2f32_nxv2i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f32_nxv2i32: @@ -8586,18 +8586,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i8(float*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i8(,,,, float*, , , i32) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i8(,,,, float*, , , i32, i32) define @test_vloxseg4_nxv2f32_nxv2i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f32_nxv2i8: @@ -8619,18 +8619,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i16(float*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i16(,,,, float*, , , i32) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i16(,,,, float*, , , i32, i32) define @test_vloxseg4_nxv2f32_nxv2i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f32_nxv2i16: @@ -8652,18 +8652,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i32(float*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i32(,,,,, float*, , , i32) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i32(,,,,, float*, , , i32, i32) define @test_vloxseg5_nxv2f32_nxv2i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2f32_nxv2i32: @@ -8686,18 +8686,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i8(float*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i8(,,,,, float*, , , i32) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i8(,,,,, float*, , , i32, i32) define @test_vloxseg5_nxv2f32_nxv2i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2f32_nxv2i8: @@ -8720,18 +8720,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i16(float*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i16(,,,,, float*, , , i32) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i16(,,,,, float*, , , i32, i32) define @test_vloxseg5_nxv2f32_nxv2i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2f32_nxv2i16: @@ -8754,18 +8754,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i32(float*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i32(,,,,,, float*, , , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i32(,,,,,, float*, , , i32, i32) define @test_vloxseg6_nxv2f32_nxv2i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2f32_nxv2i32: @@ -8789,18 +8789,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i8(float*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i8(,,,,,, float*, , , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i8(,,,,,, float*, , , i32, i32) define @test_vloxseg6_nxv2f32_nxv2i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2f32_nxv2i8: @@ -8824,18 +8824,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i16(float*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i16(,,,,,, float*, , , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i16(,,,,,, float*, , , i32, i32) define @test_vloxseg6_nxv2f32_nxv2i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2f32_nxv2i16: @@ -8859,18 +8859,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i32(float*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i32(,,,,,,, float*, , , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i32(,,,,,,, float*, , , i32, i32) define @test_vloxseg7_nxv2f32_nxv2i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2f32_nxv2i32: @@ -8895,18 +8895,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i8(float*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i8(,,,,,,, float*, , , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i8(,,,,,,, float*, , , i32, i32) define @test_vloxseg7_nxv2f32_nxv2i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2f32_nxv2i8: @@ -8931,18 +8931,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i16(float*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i16(,,,,,,, float*, , , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i16(,,,,,,, float*, , , i32, i32) define @test_vloxseg7_nxv2f32_nxv2i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2f32_nxv2i16: @@ -8967,18 +8967,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i32(float*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i32(,,,,,,,, float*, , , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i32(,,,,,,,, float*, , , i32, i32) define @test_vloxseg8_nxv2f32_nxv2i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2f32_nxv2i32: @@ -9004,18 +9004,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i8(float*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i8(,,,,,,,, float*, , , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i8(,,,,,,,, float*, , , i32, i32) define @test_vloxseg8_nxv2f32_nxv2i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2f32_nxv2i8: @@ -9041,18 +9041,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i16(float*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i16(,,,,,,,, float*, , , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i16(,,,,,,,, float*, , , i32, i32) define @test_vloxseg8_nxv2f32_nxv2i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2f32_nxv2i16: @@ -9078,18 +9078,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i8(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i8(,, half*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i8(,, half*, , , i32, i32) define @test_vloxseg2_nxv1f16_nxv1i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f16_nxv1i8: @@ -9108,18 +9108,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i8( %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i8( %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i32(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i32(,, half*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i32(,, half*, , , i32, i32) define @test_vloxseg2_nxv1f16_nxv1i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f16_nxv1i32: @@ -9138,18 +9138,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i32( %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i32( %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i16(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i16(,, half*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i16(,, half*, , , i32, i32) define @test_vloxseg2_nxv1f16_nxv1i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f16_nxv1i16: @@ -9168,18 +9168,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i16( %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i16( %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i8(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i8(,,, half*, , , i32) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i8(,,, half*, , , i32, i32) define @test_vloxseg3_nxv1f16_nxv1i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f16_nxv1i8: @@ -9200,18 +9200,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i32(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i32(,,, half*, , , i32) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i32(,,, half*, , , i32, i32) define @test_vloxseg3_nxv1f16_nxv1i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f16_nxv1i32: @@ -9232,18 +9232,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i32( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i32( %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i16(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i16(,,, half*, , , i32) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i16(,,, half*, , , i32, i32) define @test_vloxseg3_nxv1f16_nxv1i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f16_nxv1i16: @@ -9264,18 +9264,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i8(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i8(,,,, half*, , , i32) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i8(,,,, half*, , , i32, i32) define @test_vloxseg4_nxv1f16_nxv1i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f16_nxv1i8: @@ -9297,18 +9297,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i32(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i32(,,,, half*, , , i32) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i32(,,,, half*, , , i32, i32) define @test_vloxseg4_nxv1f16_nxv1i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f16_nxv1i32: @@ -9330,18 +9330,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i16(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i16(,,,, half*, , , i32) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i16(,,,, half*, , , i32, i32) define @test_vloxseg4_nxv1f16_nxv1i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f16_nxv1i16: @@ -9363,18 +9363,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i8(half*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i8(,,,,, half*, , , i32) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i8(,,,,, half*, , , i32, i32) define @test_vloxseg5_nxv1f16_nxv1i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f16_nxv1i8: @@ -9397,18 +9397,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i32(half*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i32(,,,,, half*, , , i32) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i32(,,,,, half*, , , i32, i32) define @test_vloxseg5_nxv1f16_nxv1i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f16_nxv1i32: @@ -9431,18 +9431,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i16(half*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i16(,,,,, half*, , , i32) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i16(,,,,, half*, , , i32, i32) define @test_vloxseg5_nxv1f16_nxv1i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f16_nxv1i16: @@ -9465,18 +9465,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i8(half*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i8(,,,,,, half*, , , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i8(,,,,,, half*, , , i32, i32) define @test_vloxseg6_nxv1f16_nxv1i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f16_nxv1i8: @@ -9500,18 +9500,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i32(half*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i32(,,,,,, half*, , , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i32(,,,,,, half*, , , i32, i32) define @test_vloxseg6_nxv1f16_nxv1i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f16_nxv1i32: @@ -9535,18 +9535,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i16(half*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i16(,,,,,, half*, , , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i16(,,,,,, half*, , , i32, i32) define @test_vloxseg6_nxv1f16_nxv1i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f16_nxv1i16: @@ -9570,18 +9570,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i8(half*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i8(,,,,,,, half*, , , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i8(,,,,,,, half*, , , i32, i32) define @test_vloxseg7_nxv1f16_nxv1i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f16_nxv1i8: @@ -9606,18 +9606,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i32(half*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i32(,,,,,,, half*, , , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i32(,,,,,,, half*, , , i32, i32) define @test_vloxseg7_nxv1f16_nxv1i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f16_nxv1i32: @@ -9642,18 +9642,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i16(half*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i16(,,,,,,, half*, , , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i16(,,,,,,, half*, , , i32, i32) define @test_vloxseg7_nxv1f16_nxv1i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f16_nxv1i16: @@ -9678,18 +9678,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i8(half*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i8(,,,,,,,, half*, , , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i8(,,,,,,,, half*, , , i32, i32) define @test_vloxseg8_nxv1f16_nxv1i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f16_nxv1i8: @@ -9715,18 +9715,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i32(half*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i32(,,,,,,,, half*, , , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i32(,,,,,,,, half*, , , i32, i32) define @test_vloxseg8_nxv1f16_nxv1i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f16_nxv1i32: @@ -9752,18 +9752,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i16(half*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i16(,,,,,,,, half*, , , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i16(,,,,,,,, half*, , , i32, i32) define @test_vloxseg8_nxv1f16_nxv1i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f16_nxv1i16: @@ -9789,18 +9789,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i8(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i8(,, float*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i8(,, float*, , , i32, i32) define @test_vloxseg2_nxv1f32_nxv1i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f32_nxv1i8: @@ -9819,18 +9819,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i8( %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i8( %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i32(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i32(,, float*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i32(,, float*, , , i32, i32) define @test_vloxseg2_nxv1f32_nxv1i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f32_nxv1i32: @@ -9849,18 +9849,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i32( %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i32( %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i16(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i16(,, float*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i16(,, float*, , , i32, i32) define @test_vloxseg2_nxv1f32_nxv1i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f32_nxv1i16: @@ -9879,18 +9879,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i16( %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i16( %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i8(float*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i8(,,, float*, , , i32) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i8(,,, float*, , , i32, i32) define @test_vloxseg3_nxv1f32_nxv1i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f32_nxv1i8: @@ -9911,18 +9911,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i8( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i8( %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i32(float*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i32(,,, float*, , , i32) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i32(,,, float*, , , i32, i32) define @test_vloxseg3_nxv1f32_nxv1i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f32_nxv1i32: @@ -9943,18 +9943,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i32( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i32( %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i16(float*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i16(,,, float*, , , i32) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i16(,,, float*, , , i32, i32) define @test_vloxseg3_nxv1f32_nxv1i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f32_nxv1i16: @@ -9975,18 +9975,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i16( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i16( %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i8(float*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i8(,,,, float*, , , i32) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i8(,,,, float*, , , i32, i32) define @test_vloxseg4_nxv1f32_nxv1i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f32_nxv1i8: @@ -10008,18 +10008,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i32(float*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i32(,,,, float*, , , i32) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i32(,,,, float*, , , i32, i32) define @test_vloxseg4_nxv1f32_nxv1i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f32_nxv1i32: @@ -10041,18 +10041,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i16(float*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i16(,,,, float*, , , i32) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i16(,,,, float*, , , i32, i32) define @test_vloxseg4_nxv1f32_nxv1i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f32_nxv1i16: @@ -10074,18 +10074,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i8(float*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i8(,,,,, float*, , , i32) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i8(,,,,, float*, , , i32, i32) define @test_vloxseg5_nxv1f32_nxv1i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f32_nxv1i8: @@ -10108,18 +10108,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i32(float*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i32(,,,,, float*, , , i32) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i32(,,,,, float*, , , i32, i32) define @test_vloxseg5_nxv1f32_nxv1i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f32_nxv1i32: @@ -10142,18 +10142,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i16(float*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i16(,,,,, float*, , , i32) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i16(,,,,, float*, , , i32, i32) define @test_vloxseg5_nxv1f32_nxv1i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f32_nxv1i16: @@ -10176,18 +10176,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i8(float*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i8(,,,,,, float*, , , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i8(,,,,,, float*, , , i32, i32) define @test_vloxseg6_nxv1f32_nxv1i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f32_nxv1i8: @@ -10211,18 +10211,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i32(float*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i32(,,,,,, float*, , , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i32(,,,,,, float*, , , i32, i32) define @test_vloxseg6_nxv1f32_nxv1i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f32_nxv1i32: @@ -10246,18 +10246,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i16(float*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i16(,,,,,, float*, , , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i16(,,,,,, float*, , , i32, i32) define @test_vloxseg6_nxv1f32_nxv1i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f32_nxv1i16: @@ -10281,18 +10281,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i8(float*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i8(,,,,,,, float*, , , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i8(,,,,,,, float*, , , i32, i32) define @test_vloxseg7_nxv1f32_nxv1i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f32_nxv1i8: @@ -10317,18 +10317,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i32(float*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i32(,,,,,,, float*, , , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i32(,,,,,,, float*, , , i32, i32) define @test_vloxseg7_nxv1f32_nxv1i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f32_nxv1i32: @@ -10353,18 +10353,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i16(float*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i16(,,,,,,, float*, , , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i16(,,,,,,, float*, , , i32, i32) define @test_vloxseg7_nxv1f32_nxv1i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f32_nxv1i16: @@ -10389,18 +10389,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i8(float*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i8(,,,,,,,, float*, , , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i8(,,,,,,,, float*, , , i32, i32) define @test_vloxseg8_nxv1f32_nxv1i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f32_nxv1i8: @@ -10426,18 +10426,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i32(float*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i32(,,,,,,,, float*, , , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i32(,,,,,,,, float*, , , i32, i32) define @test_vloxseg8_nxv1f32_nxv1i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f32_nxv1i32: @@ -10463,18 +10463,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i16(float*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i16(,,,,,,,, float*, , , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i16(,,,,,,,, float*, , , i32, i32) define @test_vloxseg8_nxv1f32_nxv1i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f32_nxv1i16: @@ -10500,18 +10500,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i16(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i16(,, half*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i16(,, half*, , , i32, i32) define @test_vloxseg2_nxv8f16_nxv8i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8f16_nxv8i16: @@ -10530,18 +10530,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i16( %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i16( %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i8(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i8(,, half*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i8(,, half*, , , i32, i32) define @test_vloxseg2_nxv8f16_nxv8i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8f16_nxv8i8: @@ -10560,18 +10560,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i8( %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i8( %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i32(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i32(,, half*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i32(,, half*, , , i32, i32) define @test_vloxseg2_nxv8f16_nxv8i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8f16_nxv8i32: @@ -10590,18 +10590,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i32( %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i32( %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i16(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i16(,,, half*, , , i32) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i16(,,, half*, , , i32, i32) define @test_vloxseg3_nxv8f16_nxv8i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8f16_nxv8i16: @@ -10622,18 +10622,18 @@ ; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i8(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i8(,,, half*, , , i32) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i8(,,, half*, , , i32, i32) define @test_vloxseg3_nxv8f16_nxv8i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8f16_nxv8i8: @@ -10654,18 +10654,18 @@ ; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i32(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i32(,,, half*, , , i32) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i32(,,, half*, , , i32, i32) define @test_vloxseg3_nxv8f16_nxv8i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8f16_nxv8i32: @@ -10685,18 +10685,18 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v6, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i32( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i32( %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i16(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i16(,,,, half*, , , i32) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i16(,,,, half*, , , i32, i32) define @test_vloxseg4_nxv8f16_nxv8i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8f16_nxv8i16: @@ -10718,18 +10718,18 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i8(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i8(,,,, half*, , , i32) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i8(,,,, half*, , , i32, i32) define @test_vloxseg4_nxv8f16_nxv8i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8f16_nxv8i8: @@ -10751,18 +10751,18 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i32(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i32(,,,, half*, , , i32) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i32(,,,, half*, , , i32, i32) define @test_vloxseg4_nxv8f16_nxv8i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8f16_nxv8i32: @@ -10784,18 +10784,18 @@ ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i16(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i16(,, float*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i16(,, float*, , , i32, i32) define @test_vloxseg2_nxv8f32_nxv8i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8f32_nxv8i16: @@ -10814,18 +10814,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i16( %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i16( %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i8(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i8(,, float*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i8(,, float*, , , i32, i32) define @test_vloxseg2_nxv8f32_nxv8i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8f32_nxv8i8: @@ -10844,18 +10844,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv8f32_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i8( %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i8( %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i32(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i32(,, float*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i32(,, float*, , , i32, i32) define @test_vloxseg2_nxv8f32_nxv8i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8f32_nxv8i32: @@ -10874,18 +10874,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i32( %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i32( %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i32(double*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i32(,, double*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i32(,, double*, , , i32, i32) define @test_vloxseg2_nxv2f64_nxv2i32(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f64_nxv2i32: @@ -10904,18 +10904,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i32( %val, %val, double* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i32( %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i8(double*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i8(,, double*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i8(,, double*, , , i32, i32) define @test_vloxseg2_nxv2f64_nxv2i8(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f64_nxv2i8: @@ -10934,18 +10934,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i8( %val, %val, double* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i8( %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i16(double*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i16(,, double*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i16(,, double*, , , i32, i32) define @test_vloxseg2_nxv2f64_nxv2i16(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f64_nxv2i16: @@ -10964,18 +10964,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i16( %val, %val, double* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i16( %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i32(double*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i32(,,, double*, , , i32) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i32(,,, double*, , , i32, i32) define @test_vloxseg3_nxv2f64_nxv2i32(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f64_nxv2i32: @@ -10996,18 +10996,18 @@ ; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i32( %val, %val, %val, double* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i32( %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i8(double*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i8(,,, double*, , , i32) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i8(,,, double*, , , i32, i32) define @test_vloxseg3_nxv2f64_nxv2i8(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f64_nxv2i8: @@ -11028,18 +11028,18 @@ ; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i8( %val, %val, %val, double* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i8( %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i16(double*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i16(,,, double*, , , i32) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i16(,,, double*, , , i32, i32) define @test_vloxseg3_nxv2f64_nxv2i16(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f64_nxv2i16: @@ -11060,18 +11060,18 @@ ; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i16( %val, %val, %val, double* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i16( %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i32(double*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i32(,,,, double*, , , i32) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i32(,,,, double*, , , i32, i32) define @test_vloxseg4_nxv2f64_nxv2i32(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f64_nxv2i32: @@ -11093,18 +11093,18 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i32( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i32( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i8(double*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i8(,,,, double*, , , i32) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i8(,,,, double*, , , i32, i32) define @test_vloxseg4_nxv2f64_nxv2i8(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f64_nxv2i8: @@ -11126,18 +11126,18 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i8( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i8( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i16(double*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i16(,,,, double*, , , i32) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i16(,,,, double*, , , i32, i32) define @test_vloxseg4_nxv2f64_nxv2i16(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f64_nxv2i16: @@ -11159,18 +11159,18 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i16( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i16( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i16(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i16(,, half*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i16(,, half*, , , i32, i32) define @test_vloxseg2_nxv4f16_nxv4i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f16_nxv4i16: @@ -11189,18 +11189,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i16( %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i16( %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i8(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i8(,, half*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i8(,, half*, , , i32, i32) define @test_vloxseg2_nxv4f16_nxv4i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f16_nxv4i8: @@ -11219,18 +11219,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i8( %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i8( %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i32(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i32(,, half*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i32(,, half*, , , i32, i32) define @test_vloxseg2_nxv4f16_nxv4i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f16_nxv4i32: @@ -11249,18 +11249,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i32( %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i32( %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i16(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i16(,,, half*, , , i32) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i16(,,, half*, , , i32, i32) define @test_vloxseg3_nxv4f16_nxv4i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4f16_nxv4i16: @@ -11281,18 +11281,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i8(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i8(,,, half*, , , i32) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i8(,,, half*, , , i32, i32) define @test_vloxseg3_nxv4f16_nxv4i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4f16_nxv4i8: @@ -11313,18 +11313,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i32(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i32(,,, half*, , , i32) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i32(,,, half*, , , i32, i32) define @test_vloxseg3_nxv4f16_nxv4i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4f16_nxv4i32: @@ -11344,18 +11344,18 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i32( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i32( %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i16(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i16(,,,, half*, , , i32) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i16(,,,, half*, , , i32, i32) define @test_vloxseg4_nxv4f16_nxv4i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4f16_nxv4i16: @@ -11377,18 +11377,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i8(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i8(,,,, half*, , , i32) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i8(,,,, half*, , , i32, i32) define @test_vloxseg4_nxv4f16_nxv4i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4f16_nxv4i8: @@ -11410,18 +11410,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i32(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i32(,,,, half*, , , i32) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i32(,,,, half*, , , i32, i32) define @test_vloxseg4_nxv4f16_nxv4i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4f16_nxv4i32: @@ -11443,18 +11443,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i16(half*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i16(,,,,, half*, , , i32) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i16(,,,,, half*, , , i32, i32) define @test_vloxseg5_nxv4f16_nxv4i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4f16_nxv4i16: @@ -11477,18 +11477,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i8(half*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i8(,,,,, half*, , , i32) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i8(,,,,, half*, , , i32, i32) define @test_vloxseg5_nxv4f16_nxv4i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4f16_nxv4i8: @@ -11511,18 +11511,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i32(half*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i32(,,,,, half*, , , i32) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i32(,,,,, half*, , , i32, i32) define @test_vloxseg5_nxv4f16_nxv4i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4f16_nxv4i32: @@ -11545,18 +11545,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i16(half*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i16(,,,,,, half*, , , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i16(,,,,,, half*, , , i32, i32) define @test_vloxseg6_nxv4f16_nxv4i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4f16_nxv4i16: @@ -11580,18 +11580,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i8(half*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i8(,,,,,, half*, , , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i8(,,,,,, half*, , , i32, i32) define @test_vloxseg6_nxv4f16_nxv4i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4f16_nxv4i8: @@ -11615,18 +11615,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i32(half*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i32(,,,,,, half*, , , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i32(,,,,,, half*, , , i32, i32) define @test_vloxseg6_nxv4f16_nxv4i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4f16_nxv4i32: @@ -11650,18 +11650,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i16(half*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i16(,,,,,,, half*, , , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i16(,,,,,,, half*, , , i32, i32) define @test_vloxseg7_nxv4f16_nxv4i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4f16_nxv4i16: @@ -11686,18 +11686,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i8(half*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i8(,,,,,,, half*, , , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i8(,,,,,,, half*, , , i32, i32) define @test_vloxseg7_nxv4f16_nxv4i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4f16_nxv4i8: @@ -11722,18 +11722,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i32(half*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i32(,,,,,,, half*, , , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i32(,,,,,,, half*, , , i32, i32) define @test_vloxseg7_nxv4f16_nxv4i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4f16_nxv4i32: @@ -11758,18 +11758,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i16(half*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i16(,,,,,,,, half*, , , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i16(,,,,,,,, half*, , , i32, i32) define @test_vloxseg8_nxv4f16_nxv4i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4f16_nxv4i16: @@ -11795,18 +11795,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i8(half*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i8(,,,,,,,, half*, , , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i8(,,,,,,,, half*, , , i32, i32) define @test_vloxseg8_nxv4f16_nxv4i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4f16_nxv4i8: @@ -11832,18 +11832,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i32(half*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i32(,,,,,,,, half*, , , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i32(,,,,,,,, half*, , , i32, i32) define @test_vloxseg8_nxv4f16_nxv4i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4f16_nxv4i32: @@ -11869,18 +11869,18 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i32(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i32(,, half*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i32(,, half*, , , i32, i32) define @test_vloxseg2_nxv2f16_nxv2i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f16_nxv2i32: @@ -11899,18 +11899,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i32( %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i32( %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i8(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i8(,, half*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i8(,, half*, , , i32, i32) define @test_vloxseg2_nxv2f16_nxv2i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f16_nxv2i8: @@ -11929,18 +11929,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i8( %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i8( %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i16(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i16(,, half*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i16(,, half*, , , i32, i32) define @test_vloxseg2_nxv2f16_nxv2i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f16_nxv2i16: @@ -11959,18 +11959,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i16( %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i16( %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i32(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i32(,,, half*, , , i32) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i32(,,, half*, , , i32, i32) define @test_vloxseg3_nxv2f16_nxv2i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f16_nxv2i32: @@ -11991,18 +11991,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i32( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i32( %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i8(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i8(,,, half*, , , i32) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i8(,,, half*, , , i32, i32) define @test_vloxseg3_nxv2f16_nxv2i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f16_nxv2i8: @@ -12023,18 +12023,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i16(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i16(,,, half*, , , i32) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i16(,,, half*, , , i32, i32) define @test_vloxseg3_nxv2f16_nxv2i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f16_nxv2i16: @@ -12055,18 +12055,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i32(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i32(,,,, half*, , , i32) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i32(,,,, half*, , , i32, i32) define @test_vloxseg4_nxv2f16_nxv2i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f16_nxv2i32: @@ -12088,18 +12088,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i8(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i8(,,,, half*, , , i32) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i8(,,,, half*, , , i32, i32) define @test_vloxseg4_nxv2f16_nxv2i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f16_nxv2i8: @@ -12121,18 +12121,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i16(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i16(,,,, half*, , , i32) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i16(,,,, half*, , , i32, i32) define @test_vloxseg4_nxv2f16_nxv2i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f16_nxv2i16: @@ -12154,18 +12154,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i32(half*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i32(,,,,, half*, , , i32) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i32(,,,,, half*, , , i32, i32) define @test_vloxseg5_nxv2f16_nxv2i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2f16_nxv2i32: @@ -12188,18 +12188,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i8(half*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i8(,,,,, half*, , , i32) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i8(,,,,, half*, , , i32, i32) define @test_vloxseg5_nxv2f16_nxv2i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2f16_nxv2i8: @@ -12222,18 +12222,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i16(half*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i16(,,,,, half*, , , i32) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i16(,,,,, half*, , , i32, i32) define @test_vloxseg5_nxv2f16_nxv2i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2f16_nxv2i16: @@ -12256,18 +12256,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i32(half*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i32(,,,,,, half*, , , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i32(,,,,,, half*, , , i32, i32) define @test_vloxseg6_nxv2f16_nxv2i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2f16_nxv2i32: @@ -12291,18 +12291,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i8(half*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i8(,,,,,, half*, , , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i8(,,,,,, half*, , , i32, i32) define @test_vloxseg6_nxv2f16_nxv2i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2f16_nxv2i8: @@ -12326,18 +12326,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i16(half*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i16(,,,,,, half*, , , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i16(,,,,,, half*, , , i32, i32) define @test_vloxseg6_nxv2f16_nxv2i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2f16_nxv2i16: @@ -12361,18 +12361,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i32(half*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i32(,,,,,,, half*, , , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i32(,,,,,,, half*, , , i32, i32) define @test_vloxseg7_nxv2f16_nxv2i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2f16_nxv2i32: @@ -12397,18 +12397,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i8(half*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i8(,,,,,,, half*, , , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i8(,,,,,,, half*, , , i32, i32) define @test_vloxseg7_nxv2f16_nxv2i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2f16_nxv2i8: @@ -12433,18 +12433,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i16(half*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i16(,,,,,,, half*, , , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i16(,,,,,,, half*, , , i32, i32) define @test_vloxseg7_nxv2f16_nxv2i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2f16_nxv2i16: @@ -12469,18 +12469,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i32(half*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i32(,,,,,,,, half*, , , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i32(,,,,,,,, half*, , , i32, i32) define @test_vloxseg8_nxv2f16_nxv2i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2f16_nxv2i32: @@ -12506,18 +12506,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i8(half*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i8(,,,,,,,, half*, , , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i8(,,,,,,,, half*, , , i32, i32) define @test_vloxseg8_nxv2f16_nxv2i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2f16_nxv2i8: @@ -12543,18 +12543,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i16(half*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i16(,,,,,,,, half*, , , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i16(,,,,,,,, half*, , , i32, i32) define @test_vloxseg8_nxv2f16_nxv2i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2f16_nxv2i16: @@ -12580,18 +12580,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i16(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i16(,, float*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i16(,, float*, , , i32, i32) define @test_vloxseg2_nxv4f32_nxv4i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f32_nxv4i16: @@ -12610,18 +12610,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i16( %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i16( %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i8(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i8(,, float*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i8(,, float*, , , i32, i32) define @test_vloxseg2_nxv4f32_nxv4i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f32_nxv4i8: @@ -12640,18 +12640,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i8( %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i8( %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i32(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i32(,, float*, , , i32) +declare {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i32(,, float*, , , i32, i32) define @test_vloxseg2_nxv4f32_nxv4i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f32_nxv4i32: @@ -12670,18 +12670,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i32( %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i32( %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i16(float*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i16(,,, float*, , , i32) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i16(,,, float*, , , i32, i32) define @test_vloxseg3_nxv4f32_nxv4i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4f32_nxv4i16: @@ -12702,18 +12702,18 @@ ; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i16( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i16( %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i8(float*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i8(,,, float*, , , i32) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i8(,,, float*, , , i32, i32) define @test_vloxseg3_nxv4f32_nxv4i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4f32_nxv4i8: @@ -12734,18 +12734,18 @@ ; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i8( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i8( %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i32(float*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i32(,,, float*, , , i32) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i32(,,, float*, , , i32, i32) define @test_vloxseg3_nxv4f32_nxv4i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4f32_nxv4i32: @@ -12766,18 +12766,18 @@ ; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i32( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i32( %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i16(float*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i16(,,,, float*, , , i32) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i16(,,,, float*, , , i32, i32) define @test_vloxseg4_nxv4f32_nxv4i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4f32_nxv4i16: @@ -12799,18 +12799,18 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i16( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i16( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i8(float*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i8(,,,, float*, , , i32) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i8(,,,, float*, , , i32, i32) define @test_vloxseg4_nxv4f32_nxv4i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4f32_nxv4i8: @@ -12832,18 +12832,18 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i8( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i8( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i32(float*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i32(,,,, float*, , , i32) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i32(,,,, float*, , , i32, i32) define @test_vloxseg4_nxv4f32_nxv4i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4f32_nxv4i32: @@ -12865,12 +12865,12 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i32( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i32( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } diff --git a/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv64.ll @@ -3,7 +3,7 @@ ; RUN: -verify-machineinstrs < %s | FileCheck %s declare {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i16(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i16(,, i16*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i16(,, i16*, , , i64, i64) define @test_vloxseg2_nxv16i16_nxv16i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16i16_nxv16i16: @@ -22,18 +22,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i16( %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i16( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i8(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i8(,, i16*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i8(,, i16*, , , i64, i64) define @test_vloxseg2_nxv16i16_nxv16i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16i16_nxv16i8: @@ -52,18 +52,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i8( %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i8( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i32(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i32(,, i16*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i32(,, i16*, , , i64, i64) define @test_vloxseg2_nxv16i16_nxv16i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16i16_nxv16i32: @@ -82,18 +82,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v4, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i32( %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i32( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i32(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i32(,, i32*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i32(,, i32*, , , i64, i64) define @test_vloxseg2_nxv4i32_nxv4i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i32_nxv4i32: @@ -112,18 +112,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i32( %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i32( %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i8(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i8(,, i32*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i8(,, i32*, , , i64, i64) define @test_vloxseg2_nxv4i32_nxv4i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i32_nxv4i8: @@ -142,18 +142,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i8( %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i8( %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i64(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i64(,, i32*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i64(,, i32*, , , i64, i64) define @test_vloxseg2_nxv4i32_nxv4i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i32_nxv4i64: @@ -172,18 +172,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v6, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i64( %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i64( %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i16(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i16(,, i32*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i16(,, i32*, , , i64, i64) define @test_vloxseg2_nxv4i32_nxv4i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i32_nxv4i16: @@ -202,18 +202,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i16( %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i16( %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i32(i32*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i32(,,, i32*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i32(,,, i32*, , , i64, i64) define @test_vloxseg3_nxv4i32_nxv4i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i32_nxv4i32: @@ -234,18 +234,18 @@ ; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i32( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i32( %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i8(i32*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i8(,,, i32*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i8(,,, i32*, , , i64, i64) define @test_vloxseg3_nxv4i32_nxv4i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i32_nxv4i8: @@ -266,18 +266,18 @@ ; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i8( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i8( %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i64(i32*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i64(,,, i32*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i64(,,, i32*, , , i64, i64) define @test_vloxseg3_nxv4i32_nxv4i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i32_nxv4i64: @@ -297,18 +297,18 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg3ei64.v v6, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i64( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i64( %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i16(i32*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i16(,,, i32*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i16(,,, i32*, , , i64, i64) define @test_vloxseg3_nxv4i32_nxv4i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i32_nxv4i16: @@ -329,18 +329,18 @@ ; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i16( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i16( %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i32(i32*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i32(,,,, i32*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i32(,,,, i32*, , , i64, i64) define @test_vloxseg4_nxv4i32_nxv4i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i32_nxv4i32: @@ -362,18 +362,18 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i32( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i32( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i8(i32*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i8(,,,, i32*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i8(,,,, i32*, , , i64, i64) define @test_vloxseg4_nxv4i32_nxv4i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i32_nxv4i8: @@ -395,18 +395,18 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i8( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i8( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i64(i32*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i64(,,,, i32*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i64(,,,, i32*, , , i64, i64) define @test_vloxseg4_nxv4i32_nxv4i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i32_nxv4i64: @@ -428,18 +428,18 @@ ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg4ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i64( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i64( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i16(i32*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i16(,,,, i32*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i16(,,,, i32*, , , i64, i64) define @test_vloxseg4_nxv4i32_nxv4i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i32_nxv4i16: @@ -461,18 +461,18 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i16( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i16( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i16(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i16(,, i8*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i16(,, i8*, , , i64, i64) define @test_vloxseg2_nxv16i8_nxv16i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16i8_nxv16i16: @@ -491,18 +491,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i16( %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i16( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i8(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i8(,, i8*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i8(,, i8*, , , i64, i64) define @test_vloxseg2_nxv16i8_nxv16i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16i8_nxv16i8: @@ -521,18 +521,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i8( %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i8( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i32(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i32(,, i8*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i32(,, i8*, , , i64, i64) define @test_vloxseg2_nxv16i8_nxv16i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16i8_nxv16i32: @@ -551,18 +551,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i32( %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i32( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i16(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i16(,,, i8*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i16(,,, i8*, , , i64, i64) define @test_vloxseg3_nxv16i8_nxv16i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv16i8_nxv16i16: @@ -582,18 +582,18 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v6, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i8(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i8(,,, i8*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i8(,,, i8*, , , i64, i64) define @test_vloxseg3_nxv16i8_nxv16i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv16i8_nxv16i8: @@ -614,18 +614,18 @@ ; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i32(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i32(,,, i8*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i32(,,, i8*, , , i64, i64) define @test_vloxseg3_nxv16i8_nxv16i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv16i8_nxv16i32: @@ -645,18 +645,18 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v6, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i16(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i16(,,,, i8*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i16(,,,, i8*, , , i64, i64) define @test_vloxseg4_nxv16i8_nxv16i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv16i8_nxv16i16: @@ -678,18 +678,18 @@ ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v16, (a0), v12, v0.t ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i8(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i8(,,,, i8*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i8(,,,, i8*, , , i64, i64) define @test_vloxseg4_nxv16i8_nxv16i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv16i8_nxv16i8: @@ -711,18 +711,18 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i32(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i32(,,,, i8*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i32(,,,, i8*, , , i64, i64) define @test_vloxseg4_nxv16i8_nxv16i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv16i8_nxv16i32: @@ -743,18 +743,18 @@ ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v6, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv1i64.nxv1i64(i64*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i64(,, i64*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i64(,, i64*, , , i64, i64) define @test_vloxseg2_nxv1i64_nxv1i64(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i64_nxv1i64: @@ -773,18 +773,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i64( %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i64( %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv1i64.nxv1i32(i64*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i32(,, i64*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i32(,, i64*, , , i64, i64) define @test_vloxseg2_nxv1i64_nxv1i32(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i64_nxv1i32: @@ -803,18 +803,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i32( %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i32( %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv1i64.nxv1i16(i64*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i16(,, i64*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i16(,, i64*, , , i64, i64) define @test_vloxseg2_nxv1i64_nxv1i16(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i64_nxv1i16: @@ -833,18 +833,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i16( %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i16( %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv1i64.nxv1i8(i64*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i8(,, i64*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i8(,, i64*, , , i64, i64) define @test_vloxseg2_nxv1i64_nxv1i8(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i64_nxv1i8: @@ -863,18 +863,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i8( %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i8( %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv1i64(i64*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i64(,,, i64*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i64(,,, i64*, , , i64, i64) define @test_vloxseg3_nxv1i64_nxv1i64(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i64_nxv1i64: @@ -895,18 +895,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i64( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i64( %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv1i32(i64*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i32(,,, i64*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i32(,,, i64*, , , i64, i64) define @test_vloxseg3_nxv1i64_nxv1i32(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i64_nxv1i32: @@ -927,18 +927,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i32( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i32( %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv1i16(i64*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i16(,,, i64*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i16(,,, i64*, , , i64, i64) define @test_vloxseg3_nxv1i64_nxv1i16(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i64_nxv1i16: @@ -959,18 +959,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i16( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i16( %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv1i8(i64*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i8(,,, i64*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i8(,,, i64*, , , i64, i64) define @test_vloxseg3_nxv1i64_nxv1i8(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i64_nxv1i8: @@ -991,18 +991,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i8( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i8( %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv1i64(i64*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i64(,,,, i64*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i64(,,,, i64*, , , i64, i64) define @test_vloxseg4_nxv1i64_nxv1i64(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i64_nxv1i64: @@ -1024,18 +1024,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv1i32(i64*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i32(,,,, i64*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i32(,,,, i64*, , , i64, i64) define @test_vloxseg4_nxv1i64_nxv1i32(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i64_nxv1i32: @@ -1057,18 +1057,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv1i16(i64*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i16(,,,, i64*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i16(,,,, i64*, , , i64, i64) define @test_vloxseg4_nxv1i64_nxv1i16(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i64_nxv1i16: @@ -1090,18 +1090,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv1i8(i64*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i8(,,,, i64*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i8(,,,, i64*, , , i64, i64) define @test_vloxseg4_nxv1i64_nxv1i8(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i64_nxv1i8: @@ -1123,18 +1123,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv1i64(i64*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i64(,,,,, i64*, , , i64) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i64(,,,,, i64*, , , i64, i64) define @test_vloxseg5_nxv1i64_nxv1i64(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i64_nxv1i64: @@ -1157,18 +1157,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv1i32(i64*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i32(,,,,, i64*, , , i64) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i32(,,,,, i64*, , , i64, i64) define @test_vloxseg5_nxv1i64_nxv1i32(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i64_nxv1i32: @@ -1191,18 +1191,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv1i16(i64*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i16(,,,,, i64*, , , i64) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i16(,,,,, i64*, , , i64, i64) define @test_vloxseg5_nxv1i64_nxv1i16(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i64_nxv1i16: @@ -1225,18 +1225,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv1i8(i64*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i8(,,,,, i64*, , , i64) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i8(,,,,, i64*, , , i64, i64) define @test_vloxseg5_nxv1i64_nxv1i8(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i64_nxv1i8: @@ -1259,18 +1259,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv1i64(i64*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i64(,,,,,, i64*, , , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i64(,,,,,, i64*, , , i64, i64) define @test_vloxseg6_nxv1i64_nxv1i64(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i64_nxv1i64: @@ -1294,18 +1294,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv1i32(i64*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i32(,,,,,, i64*, , , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i32(,,,,,, i64*, , , i64, i64) define @test_vloxseg6_nxv1i64_nxv1i32(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i64_nxv1i32: @@ -1329,18 +1329,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv1i16(i64*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i16(,,,,,, i64*, , , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i16(,,,,,, i64*, , , i64, i64) define @test_vloxseg6_nxv1i64_nxv1i16(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i64_nxv1i16: @@ -1364,18 +1364,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv1i8(i64*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i8(,,,,,, i64*, , , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i8(,,,,,, i64*, , , i64, i64) define @test_vloxseg6_nxv1i64_nxv1i8(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i64_nxv1i8: @@ -1399,18 +1399,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv1i64(i64*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i64(,,,,,,, i64*, , , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i64(,,,,,,, i64*, , , i64, i64) define @test_vloxseg7_nxv1i64_nxv1i64(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i64_nxv1i64: @@ -1435,18 +1435,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv1i32(i64*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i32(,,,,,,, i64*, , , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i32(,,,,,,, i64*, , , i64, i64) define @test_vloxseg7_nxv1i64_nxv1i32(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i64_nxv1i32: @@ -1471,18 +1471,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv1i16(i64*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i16(,,,,,,, i64*, , , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i16(,,,,,,, i64*, , , i64, i64) define @test_vloxseg7_nxv1i64_nxv1i16(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i64_nxv1i16: @@ -1507,18 +1507,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv1i8(i64*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i8(,,,,,,, i64*, , , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i8(,,,,,,, i64*, , , i64, i64) define @test_vloxseg7_nxv1i64_nxv1i8(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i64_nxv1i8: @@ -1543,18 +1543,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv1i64(i64*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i64(,,,,,,,, i64*, , , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i64(,,,,,,,, i64*, , , i64, i64) define @test_vloxseg8_nxv1i64_nxv1i64(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i64_nxv1i64: @@ -1580,18 +1580,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv1i32(i64*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i32(,,,,,,,, i64*, , , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i32(,,,,,,,, i64*, , , i64, i64) define @test_vloxseg8_nxv1i64_nxv1i32(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i64_nxv1i32: @@ -1617,18 +1617,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv1i16(i64*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i16(,,,,,,,, i64*, , , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i16(,,,,,,,, i64*, , , i64, i64) define @test_vloxseg8_nxv1i64_nxv1i16(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i64_nxv1i16: @@ -1654,18 +1654,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv1i8(i64*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i8(,,,,,,,, i64*, , , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i8(,,,,,,,, i64*, , , i64, i64) define @test_vloxseg8_nxv1i64_nxv1i8(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i64_nxv1i8: @@ -1691,18 +1691,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i64(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i64(,, i32*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i64(,, i32*, , , i64, i64) define @test_vloxseg2_nxv1i32_nxv1i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i32_nxv1i64: @@ -1721,18 +1721,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i64( %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i64( %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i32(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i32(,, i32*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i32(,, i32*, , , i64, i64) define @test_vloxseg2_nxv1i32_nxv1i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i32_nxv1i32: @@ -1751,18 +1751,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i32( %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i32( %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i16(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i16(,, i32*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i16(,, i32*, , , i64, i64) define @test_vloxseg2_nxv1i32_nxv1i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i32_nxv1i16: @@ -1781,18 +1781,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i16( %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i16( %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i8(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i8(,, i32*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i8(,, i32*, , , i64, i64) define @test_vloxseg2_nxv1i32_nxv1i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i32_nxv1i8: @@ -1811,18 +1811,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i8( %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i8( %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i64(i32*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i64(,,, i32*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i64(,,, i32*, , , i64, i64) define @test_vloxseg3_nxv1i32_nxv1i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i32_nxv1i64: @@ -1843,18 +1843,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i64( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i64( %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i32(i32*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i32(,,, i32*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i32(,,, i32*, , , i64, i64) define @test_vloxseg3_nxv1i32_nxv1i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i32_nxv1i32: @@ -1875,18 +1875,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i32( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i32( %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i16(i32*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i16(,,, i32*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i16(,,, i32*, , , i64, i64) define @test_vloxseg3_nxv1i32_nxv1i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i32_nxv1i16: @@ -1907,18 +1907,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i16( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i16( %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i8(i32*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i8(,,, i32*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i8(,,, i32*, , , i64, i64) define @test_vloxseg3_nxv1i32_nxv1i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i32_nxv1i8: @@ -1939,18 +1939,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i8( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i8( %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i64(i32*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i64(,,,, i32*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i64(,,,, i32*, , , i64, i64) define @test_vloxseg4_nxv1i32_nxv1i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i32_nxv1i64: @@ -1972,18 +1972,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i32(i32*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i32(,,,, i32*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i32(,,,, i32*, , , i64, i64) define @test_vloxseg4_nxv1i32_nxv1i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i32_nxv1i32: @@ -2005,18 +2005,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i16(i32*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i16(,,,, i32*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i16(,,,, i32*, , , i64, i64) define @test_vloxseg4_nxv1i32_nxv1i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i32_nxv1i16: @@ -2038,18 +2038,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i8(i32*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i8(,,,, i32*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i8(,,,, i32*, , , i64, i64) define @test_vloxseg4_nxv1i32_nxv1i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i32_nxv1i8: @@ -2071,18 +2071,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i64(i32*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i64(,,,,, i32*, , , i64) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i64(,,,,, i32*, , , i64, i64) define @test_vloxseg5_nxv1i32_nxv1i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i32_nxv1i64: @@ -2105,18 +2105,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i32(i32*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i32(,,,,, i32*, , , i64) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i32(,,,,, i32*, , , i64, i64) define @test_vloxseg5_nxv1i32_nxv1i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i32_nxv1i32: @@ -2139,18 +2139,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i16(i32*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i16(,,,,, i32*, , , i64) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i16(,,,,, i32*, , , i64, i64) define @test_vloxseg5_nxv1i32_nxv1i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i32_nxv1i16: @@ -2173,18 +2173,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i8(i32*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i8(,,,,, i32*, , , i64) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i8(,,,,, i32*, , , i64, i64) define @test_vloxseg5_nxv1i32_nxv1i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i32_nxv1i8: @@ -2207,18 +2207,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i64(i32*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i64(,,,,,, i32*, , , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i64(,,,,,, i32*, , , i64, i64) define @test_vloxseg6_nxv1i32_nxv1i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i32_nxv1i64: @@ -2242,18 +2242,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i32(i32*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i32(,,,,,, i32*, , , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i32(,,,,,, i32*, , , i64, i64) define @test_vloxseg6_nxv1i32_nxv1i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i32_nxv1i32: @@ -2277,18 +2277,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i16(i32*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i16(,,,,,, i32*, , , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i16(,,,,,, i32*, , , i64, i64) define @test_vloxseg6_nxv1i32_nxv1i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i32_nxv1i16: @@ -2312,18 +2312,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i8(i32*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i8(,,,,,, i32*, , , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i8(,,,,,, i32*, , , i64, i64) define @test_vloxseg6_nxv1i32_nxv1i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i32_nxv1i8: @@ -2347,18 +2347,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i64(i32*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i64(,,,,,,, i32*, , , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i64(,,,,,,, i32*, , , i64, i64) define @test_vloxseg7_nxv1i32_nxv1i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i32_nxv1i64: @@ -2383,18 +2383,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i32(i32*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i32(,,,,,,, i32*, , , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i32(,,,,,,, i32*, , , i64, i64) define @test_vloxseg7_nxv1i32_nxv1i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i32_nxv1i32: @@ -2419,18 +2419,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i16(i32*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i16(,,,,,,, i32*, , , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i16(,,,,,,, i32*, , , i64, i64) define @test_vloxseg7_nxv1i32_nxv1i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i32_nxv1i16: @@ -2455,18 +2455,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i8(i32*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i8(,,,,,,, i32*, , , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i8(,,,,,,, i32*, , , i64, i64) define @test_vloxseg7_nxv1i32_nxv1i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i32_nxv1i8: @@ -2491,18 +2491,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i64(i32*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i64(,,,,,,,, i32*, , , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i64(,,,,,,,, i32*, , , i64, i64) define @test_vloxseg8_nxv1i32_nxv1i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i32_nxv1i64: @@ -2528,18 +2528,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i32(i32*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i32(,,,,,,,, i32*, , , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i32(,,,,,,,, i32*, , , i64, i64) define @test_vloxseg8_nxv1i32_nxv1i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i32_nxv1i32: @@ -2565,18 +2565,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i16(i32*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i16(,,,,,,,, i32*, , , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i16(,,,,,,,, i32*, , , i64, i64) define @test_vloxseg8_nxv1i32_nxv1i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i32_nxv1i16: @@ -2602,18 +2602,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i8(i32*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i8(,,,,,,,, i32*, , , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i8(,,,,,,,, i32*, , , i64, i64) define @test_vloxseg8_nxv1i32_nxv1i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i32_nxv1i8: @@ -2639,18 +2639,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i16(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i16(,, i16*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i16(,, i16*, , , i64, i64) define @test_vloxseg2_nxv8i16_nxv8i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i16_nxv8i16: @@ -2669,18 +2669,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i16( %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i16( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i8(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i8(,, i16*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i8(,, i16*, , , i64, i64) define @test_vloxseg2_nxv8i16_nxv8i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i16_nxv8i8: @@ -2699,18 +2699,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i8( %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i8( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i64(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i64(,, i16*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i64(,, i16*, , , i64, i64) define @test_vloxseg2_nxv8i16_nxv8i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i16_nxv8i64: @@ -2729,18 +2729,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv8i16_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v6, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i64( %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i64( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i32(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i32(,, i16*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i32(,, i16*, , , i64, i64) define @test_vloxseg2_nxv8i16_nxv8i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i16_nxv8i32: @@ -2759,18 +2759,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i32( %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i32( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i16(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i16(,,, i16*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i16(,,, i16*, , , i64, i64) define @test_vloxseg3_nxv8i16_nxv8i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8i16_nxv8i16: @@ -2791,18 +2791,18 @@ ; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i8(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i8(,,, i16*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i8(,,, i16*, , , i64, i64) define @test_vloxseg3_nxv8i16_nxv8i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8i16_nxv8i8: @@ -2823,18 +2823,18 @@ ; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i64(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i64(,,, i16*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i64(,,, i16*, , , i64, i64) define @test_vloxseg3_nxv8i16_nxv8i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8i16_nxv8i64: @@ -2854,18 +2854,18 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg3ei64.v v6, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i64( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i64( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i32(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i32(,,, i16*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i32(,,, i16*, , , i64, i64) define @test_vloxseg3_nxv8i16_nxv8i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8i16_nxv8i32: @@ -2885,18 +2885,18 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v6, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i32( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i32( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i16(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i16(,,,, i16*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i16(,,,, i16*, , , i64, i64) define @test_vloxseg4_nxv8i16_nxv8i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8i16_nxv8i16: @@ -2918,18 +2918,18 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i8(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i8(,,,, i16*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i8(,,,, i16*, , , i64, i64) define @test_vloxseg4_nxv8i16_nxv8i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8i16_nxv8i8: @@ -2951,18 +2951,18 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i64(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i64(,,,, i16*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i64(,,,, i16*, , , i64, i64) define @test_vloxseg4_nxv8i16_nxv8i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8i16_nxv8i64: @@ -2983,18 +2983,18 @@ ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg4ei64.v v6, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i64( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i64( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i32(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i32(,,,, i16*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i32(,,,, i16*, , , i64, i64) define @test_vloxseg4_nxv8i16_nxv8i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8i16_nxv8i32: @@ -3016,18 +3016,18 @@ ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i32(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i32(,, i8*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i32(,, i8*, , , i64, i64) define @test_vloxseg2_nxv4i8_nxv4i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i8_nxv4i32: @@ -3046,18 +3046,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i32( %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i32( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i8(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i8(,, i8*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i8(,, i8*, , , i64, i64) define @test_vloxseg2_nxv4i8_nxv4i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i8_nxv4i8: @@ -3076,18 +3076,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i8( %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i8( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i64(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i64(,, i8*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i64(,, i8*, , , i64, i64) define @test_vloxseg2_nxv4i8_nxv4i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i8_nxv4i64: @@ -3106,18 +3106,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i64( %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i64( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i16(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i16(,, i8*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i16(,, i8*, , , i64, i64) define @test_vloxseg2_nxv4i8_nxv4i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i8_nxv4i16: @@ -3136,18 +3136,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i16( %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i16( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i32(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i32(,,, i8*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i32(,,, i8*, , , i64, i64) define @test_vloxseg3_nxv4i8_nxv4i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i8_nxv4i32: @@ -3167,18 +3167,18 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i8(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i8(,,, i8*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i8(,,, i8*, , , i64, i64) define @test_vloxseg3_nxv4i8_nxv4i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i8_nxv4i8: @@ -3199,18 +3199,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i64(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i64(,,, i8*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i64(,,, i8*, , , i64, i64) define @test_vloxseg3_nxv4i8_nxv4i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i8_nxv4i64: @@ -3230,18 +3230,18 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i64( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i64( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i16(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i16(,,, i8*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i16(,,, i8*, , , i64, i64) define @test_vloxseg3_nxv4i8_nxv4i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i8_nxv4i16: @@ -3262,18 +3262,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i32(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i32(,,,, i8*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i32(,,,, i8*, , , i64, i64) define @test_vloxseg4_nxv4i8_nxv4i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i8_nxv4i32: @@ -3295,18 +3295,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i8(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i8(,,,, i8*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i8(,,,, i8*, , , i64, i64) define @test_vloxseg4_nxv4i8_nxv4i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i8_nxv4i8: @@ -3328,18 +3328,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i64(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i64(,,,, i8*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i64(,,,, i8*, , , i64, i64) define @test_vloxseg4_nxv4i8_nxv4i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i8_nxv4i64: @@ -3360,18 +3360,18 @@ ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei64.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i16(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i16(,,,, i8*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i16(,,,, i8*, , , i64, i64) define @test_vloxseg4_nxv4i8_nxv4i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i8_nxv4i16: @@ -3393,18 +3393,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i32(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i32(,,,,, i8*, , , i64) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i32(,,,,, i8*, , , i64, i64) define @test_vloxseg5_nxv4i8_nxv4i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4i8_nxv4i32: @@ -3427,18 +3427,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i8(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i8(,,,,, i8*, , , i64) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i8(,,,,, i8*, , , i64, i64) define @test_vloxseg5_nxv4i8_nxv4i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4i8_nxv4i8: @@ -3461,18 +3461,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i64(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i64(,,,,, i8*, , , i64) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i64(,,,,, i8*, , , i64, i64) define @test_vloxseg5_nxv4i8_nxv4i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4i8_nxv4i64: @@ -3494,18 +3494,18 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei64.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i16(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i16(,,,,, i8*, , , i64) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i16(,,,,, i8*, , , i64, i64) define @test_vloxseg5_nxv4i8_nxv4i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4i8_nxv4i16: @@ -3528,18 +3528,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i32(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i32(,,,,,, i8*, , , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i32(,,,,,, i8*, , , i64, i64) define @test_vloxseg6_nxv4i8_nxv4i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4i8_nxv4i32: @@ -3563,18 +3563,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i8(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i8(,,,,,, i8*, , , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i8(,,,,,, i8*, , , i64, i64) define @test_vloxseg6_nxv4i8_nxv4i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4i8_nxv4i8: @@ -3598,18 +3598,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i64(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i64(,,,,,, i8*, , , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i64(,,,,,, i8*, , , i64, i64) define @test_vloxseg6_nxv4i8_nxv4i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4i8_nxv4i64: @@ -3633,18 +3633,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v12, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i16(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i16(,,,,,, i8*, , , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i16(,,,,,, i8*, , , i64, i64) define @test_vloxseg6_nxv4i8_nxv4i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4i8_nxv4i16: @@ -3668,18 +3668,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i32(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i32(,,,,,,, i8*, , , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i32(,,,,,,, i8*, , , i64, i64) define @test_vloxseg7_nxv4i8_nxv4i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4i8_nxv4i32: @@ -3704,18 +3704,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i8(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i8(,,,,,,, i8*, , , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i8(,,,,,,, i8*, , , i64, i64) define @test_vloxseg7_nxv4i8_nxv4i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4i8_nxv4i8: @@ -3740,18 +3740,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i64(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i64(,,,,,,, i8*, , , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i64(,,,,,,, i8*, , , i64, i64) define @test_vloxseg7_nxv4i8_nxv4i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4i8_nxv4i64: @@ -3776,18 +3776,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v12, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i16(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i16(,,,,,,, i8*, , , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i16(,,,,,,, i8*, , , i64, i64) define @test_vloxseg7_nxv4i8_nxv4i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4i8_nxv4i16: @@ -3812,18 +3812,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i32(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i32(,,,,,,,, i8*, , , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i32(,,,,,,,, i8*, , , i64, i64) define @test_vloxseg8_nxv4i8_nxv4i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4i8_nxv4i32: @@ -3849,18 +3849,18 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i8(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i8(,,,,,,,, i8*, , , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i8(,,,,,,,, i8*, , , i64, i64) define @test_vloxseg8_nxv4i8_nxv4i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4i8_nxv4i8: @@ -3886,18 +3886,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i64(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i64(,,,,,,,, i8*, , , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i64(,,,,,,,, i8*, , , i64, i64) define @test_vloxseg8_nxv4i8_nxv4i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4i8_nxv4i64: @@ -3923,18 +3923,18 @@ ; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vmv1r.v v22, v16 ; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i16(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i16(,,,,,,,, i8*, , , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i16(,,,,,,,, i8*, , , i64, i64) define @test_vloxseg8_nxv4i8_nxv4i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4i8_nxv4i16: @@ -3960,18 +3960,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i64(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i64(,, i16*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i64(,, i16*, , , i64, i64) define @test_vloxseg2_nxv1i16_nxv1i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i16_nxv1i64: @@ -3990,18 +3990,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i64( %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i64( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i32(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i32(,, i16*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i32(,, i16*, , , i64, i64) define @test_vloxseg2_nxv1i16_nxv1i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i16_nxv1i32: @@ -4020,18 +4020,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i32( %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i32( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i16(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i16(,, i16*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i16(,, i16*, , , i64, i64) define @test_vloxseg2_nxv1i16_nxv1i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i16_nxv1i16: @@ -4050,18 +4050,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i16( %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i16( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i8(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i8(,, i16*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i8(,, i16*, , , i64, i64) define @test_vloxseg2_nxv1i16_nxv1i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i16_nxv1i8: @@ -4080,18 +4080,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i8( %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i8( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i64(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i64(,,, i16*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i64(,,, i16*, , , i64, i64) define @test_vloxseg3_nxv1i16_nxv1i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i16_nxv1i64: @@ -4112,18 +4112,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i64( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i64( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i32(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i32(,,, i16*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i32(,,, i16*, , , i64, i64) define @test_vloxseg3_nxv1i16_nxv1i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i16_nxv1i32: @@ -4144,18 +4144,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i32( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i32( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i16(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i16(,,, i16*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i16(,,, i16*, , , i64, i64) define @test_vloxseg3_nxv1i16_nxv1i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i16_nxv1i16: @@ -4176,18 +4176,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i8(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i8(,,, i16*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i8(,,, i16*, , , i64, i64) define @test_vloxseg3_nxv1i16_nxv1i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i16_nxv1i8: @@ -4208,18 +4208,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i64(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i64(,,,, i16*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i64(,,,, i16*, , , i64, i64) define @test_vloxseg4_nxv1i16_nxv1i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i16_nxv1i64: @@ -4241,18 +4241,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i32(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i32(,,,, i16*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i32(,,,, i16*, , , i64, i64) define @test_vloxseg4_nxv1i16_nxv1i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i16_nxv1i32: @@ -4274,18 +4274,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i16(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i16(,,,, i16*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i16(,,,, i16*, , , i64, i64) define @test_vloxseg4_nxv1i16_nxv1i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i16_nxv1i16: @@ -4307,18 +4307,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i8(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i8(,,,, i16*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i8(,,,, i16*, , , i64, i64) define @test_vloxseg4_nxv1i16_nxv1i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i16_nxv1i8: @@ -4340,18 +4340,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i64(i16*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i64(,,,,, i16*, , , i64) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i64(,,,,, i16*, , , i64, i64) define @test_vloxseg5_nxv1i16_nxv1i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i16_nxv1i64: @@ -4374,18 +4374,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i32(i16*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i32(,,,,, i16*, , , i64) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i32(,,,,, i16*, , , i64, i64) define @test_vloxseg5_nxv1i16_nxv1i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i16_nxv1i32: @@ -4408,18 +4408,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i16(i16*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i16(,,,,, i16*, , , i64) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i16(,,,,, i16*, , , i64, i64) define @test_vloxseg5_nxv1i16_nxv1i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i16_nxv1i16: @@ -4442,18 +4442,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i8(i16*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i8(,,,,, i16*, , , i64) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i8(,,,,, i16*, , , i64, i64) define @test_vloxseg5_nxv1i16_nxv1i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i16_nxv1i8: @@ -4476,18 +4476,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i64(i16*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i64(,,,,,, i16*, , , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i64(,,,,,, i16*, , , i64, i64) define @test_vloxseg6_nxv1i16_nxv1i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i16_nxv1i64: @@ -4511,18 +4511,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i32(i16*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i32(,,,,,, i16*, , , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i32(,,,,,, i16*, , , i64, i64) define @test_vloxseg6_nxv1i16_nxv1i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i16_nxv1i32: @@ -4546,18 +4546,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i16(i16*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i16(,,,,,, i16*, , , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i16(,,,,,, i16*, , , i64, i64) define @test_vloxseg6_nxv1i16_nxv1i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i16_nxv1i16: @@ -4581,18 +4581,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i8(i16*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i8(,,,,,, i16*, , , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i8(,,,,,, i16*, , , i64, i64) define @test_vloxseg6_nxv1i16_nxv1i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i16_nxv1i8: @@ -4616,18 +4616,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i64(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i64(,,,,,,, i16*, , , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i64(,,,,,,, i16*, , , i64, i64) define @test_vloxseg7_nxv1i16_nxv1i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i16_nxv1i64: @@ -4652,18 +4652,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i32(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i32(,,,,,,, i16*, , , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i32(,,,,,,, i16*, , , i64, i64) define @test_vloxseg7_nxv1i16_nxv1i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i16_nxv1i32: @@ -4688,18 +4688,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i16(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i16(,,,,,,, i16*, , , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i16(,,,,,,, i16*, , , i64, i64) define @test_vloxseg7_nxv1i16_nxv1i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i16_nxv1i16: @@ -4724,18 +4724,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i8(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i8(,,,,,,, i16*, , , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i8(,,,,,,, i16*, , , i64, i64) define @test_vloxseg7_nxv1i16_nxv1i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i16_nxv1i8: @@ -4760,18 +4760,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i64(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i64(,,,,,,,, i16*, , , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i64(,,,,,,,, i16*, , , i64, i64) define @test_vloxseg8_nxv1i16_nxv1i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i16_nxv1i64: @@ -4797,18 +4797,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i32(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i32(,,,,,,,, i16*, , , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i32(,,,,,,,, i16*, , , i64, i64) define @test_vloxseg8_nxv1i16_nxv1i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i16_nxv1i32: @@ -4834,18 +4834,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i16(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i16(,,,,,,,, i16*, , , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i16(,,,,,,,, i16*, , , i64, i64) define @test_vloxseg8_nxv1i16_nxv1i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i16_nxv1i16: @@ -4871,18 +4871,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i8(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i8(,,,,,,,, i16*, , , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i8(,,,,,,,, i16*, , , i64, i64) define @test_vloxseg8_nxv1i16_nxv1i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i16_nxv1i8: @@ -4908,18 +4908,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i32(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i32(,, i32*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i32(,, i32*, , , i64, i64) define @test_vloxseg2_nxv2i32_nxv2i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i32_nxv2i32: @@ -4938,18 +4938,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i32( %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i32( %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i8(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i8(,, i32*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i8(,, i32*, , , i64, i64) define @test_vloxseg2_nxv2i32_nxv2i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i32_nxv2i8: @@ -4968,18 +4968,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i8( %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i8( %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i16(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i16(,, i32*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i16(,, i32*, , , i64, i64) define @test_vloxseg2_nxv2i32_nxv2i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i32_nxv2i16: @@ -4998,18 +4998,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i16( %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i16( %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i64(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i64(,, i32*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i64(,, i32*, , , i64, i64) define @test_vloxseg2_nxv2i32_nxv2i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i32_nxv2i64: @@ -5028,18 +5028,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i64( %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i64( %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i32(i32*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i32(,,, i32*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i32(,,, i32*, , , i64, i64) define @test_vloxseg3_nxv2i32_nxv2i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i32_nxv2i32: @@ -5060,18 +5060,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i32( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i32( %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i8(i32*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i8(,,, i32*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i8(,,, i32*, , , i64, i64) define @test_vloxseg3_nxv2i32_nxv2i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i32_nxv2i8: @@ -5092,18 +5092,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i8( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i8( %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i16(i32*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i16(,,, i32*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i16(,,, i32*, , , i64, i64) define @test_vloxseg3_nxv2i32_nxv2i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i32_nxv2i16: @@ -5124,18 +5124,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i16( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i16( %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i64(i32*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i64(,,, i32*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i64(,,, i32*, , , i64, i64) define @test_vloxseg3_nxv2i32_nxv2i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i32_nxv2i64: @@ -5155,18 +5155,18 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i64( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i64( %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i32(i32*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i32(,,,, i32*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i32(,,,, i32*, , , i64, i64) define @test_vloxseg4_nxv2i32_nxv2i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i32_nxv2i32: @@ -5188,18 +5188,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i8(i32*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i8(,,,, i32*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i8(,,,, i32*, , , i64, i64) define @test_vloxseg4_nxv2i32_nxv2i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i32_nxv2i8: @@ -5221,18 +5221,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i16(i32*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i16(,,,, i32*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i16(,,,, i32*, , , i64, i64) define @test_vloxseg4_nxv2i32_nxv2i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i32_nxv2i16: @@ -5254,18 +5254,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i64(i32*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i64(,,,, i32*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i64(,,,, i32*, , , i64, i64) define @test_vloxseg4_nxv2i32_nxv2i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i32_nxv2i64: @@ -5287,18 +5287,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i32(i32*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i32(,,,,, i32*, , , i64) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i32(,,,,, i32*, , , i64, i64) define @test_vloxseg5_nxv2i32_nxv2i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i32_nxv2i32: @@ -5321,18 +5321,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i8(i32*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i8(,,,,, i32*, , , i64) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i8(,,,,, i32*, , , i64, i64) define @test_vloxseg5_nxv2i32_nxv2i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i32_nxv2i8: @@ -5355,18 +5355,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i16(i32*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i16(,,,,, i32*, , , i64) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i16(,,,,, i32*, , , i64, i64) define @test_vloxseg5_nxv2i32_nxv2i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i32_nxv2i16: @@ -5389,18 +5389,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i64(i32*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i64(,,,,, i32*, , , i64) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i64(,,,,, i32*, , , i64, i64) define @test_vloxseg5_nxv2i32_nxv2i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i32_nxv2i64: @@ -5423,18 +5423,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i32(i32*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i32(,,,,,, i32*, , , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i32(,,,,,, i32*, , , i64, i64) define @test_vloxseg6_nxv2i32_nxv2i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i32_nxv2i32: @@ -5458,18 +5458,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i8(i32*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i8(,,,,,, i32*, , , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i8(,,,,,, i32*, , , i64, i64) define @test_vloxseg6_nxv2i32_nxv2i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i32_nxv2i8: @@ -5493,18 +5493,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i16(i32*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i16(,,,,,, i32*, , , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i16(,,,,,, i32*, , , i64, i64) define @test_vloxseg6_nxv2i32_nxv2i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i32_nxv2i16: @@ -5528,18 +5528,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i64(i32*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i64(,,,,,, i32*, , , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i64(,,,,,, i32*, , , i64, i64) define @test_vloxseg6_nxv2i32_nxv2i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i32_nxv2i64: @@ -5563,18 +5563,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i32(i32*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i32(,,,,,,, i32*, , , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i32(,,,,,,, i32*, , , i64, i64) define @test_vloxseg7_nxv2i32_nxv2i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i32_nxv2i32: @@ -5599,18 +5599,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i8(i32*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i8(,,,,,,, i32*, , , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i8(,,,,,,, i32*, , , i64, i64) define @test_vloxseg7_nxv2i32_nxv2i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i32_nxv2i8: @@ -5635,18 +5635,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i16(i32*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i16(,,,,,,, i32*, , , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i16(,,,,,,, i32*, , , i64, i64) define @test_vloxseg7_nxv2i32_nxv2i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i32_nxv2i16: @@ -5671,18 +5671,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i64(i32*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i64(,,,,,,, i32*, , , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i64(,,,,,,, i32*, , , i64, i64) define @test_vloxseg7_nxv2i32_nxv2i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i32_nxv2i64: @@ -5707,18 +5707,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i32(i32*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i32(,,,,,,,, i32*, , , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i32(,,,,,,,, i32*, , , i64, i64) define @test_vloxseg8_nxv2i32_nxv2i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i32_nxv2i32: @@ -5744,18 +5744,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i8(i32*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i8(,,,,,,,, i32*, , , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i8(,,,,,,,, i32*, , , i64, i64) define @test_vloxseg8_nxv2i32_nxv2i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i32_nxv2i8: @@ -5781,18 +5781,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i16(i32*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i16(,,,,,,,, i32*, , , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i16(,,,,,,,, i32*, , , i64, i64) define @test_vloxseg8_nxv2i32_nxv2i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i32_nxv2i16: @@ -5818,18 +5818,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i64(i32*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i64(,,,,,,,, i32*, , , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i64(,,,,,,,, i32*, , , i64, i64) define @test_vloxseg8_nxv2i32_nxv2i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i32_nxv2i64: @@ -5855,18 +5855,18 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i16(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i16(,, i8*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i16(,, i8*, , , i64, i64) define @test_vloxseg2_nxv8i8_nxv8i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i8_nxv8i16: @@ -5885,18 +5885,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i16( %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i16( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i8(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i8(,, i8*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i8(,, i8*, , , i64, i64) define @test_vloxseg2_nxv8i8_nxv8i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i8_nxv8i8: @@ -5915,18 +5915,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i8( %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i8( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i64(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i64(,, i8*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i64(,, i8*, , , i64, i64) define @test_vloxseg2_nxv8i8_nxv8i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i8_nxv8i64: @@ -5945,18 +5945,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i64( %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i64( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i32(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i32(,, i8*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i32(,, i8*, , , i64, i64) define @test_vloxseg2_nxv8i8_nxv8i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i8_nxv8i32: @@ -5975,18 +5975,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i32( %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i32( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i16(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i16(,,, i8*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i16(,,, i8*, , , i64, i64) define @test_vloxseg3_nxv8i8_nxv8i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8i8_nxv8i16: @@ -6006,18 +6006,18 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i8(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i8(,,, i8*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i8(,,, i8*, , , i64, i64) define @test_vloxseg3_nxv8i8_nxv8i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8i8_nxv8i8: @@ -6038,18 +6038,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i64(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i64(,,, i8*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i64(,,, i8*, , , i64, i64) define @test_vloxseg3_nxv8i8_nxv8i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8i8_nxv8i64: @@ -6069,18 +6069,18 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i64( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i64( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i32(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i32(,,, i8*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i32(,,, i8*, , , i64, i64) define @test_vloxseg3_nxv8i8_nxv8i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8i8_nxv8i32: @@ -6100,18 +6100,18 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i16(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i16(,,,, i8*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i16(,,,, i8*, , , i64, i64) define @test_vloxseg4_nxv8i8_nxv8i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8i8_nxv8i16: @@ -6133,18 +6133,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i8(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i8(,,,, i8*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i8(,,,, i8*, , , i64, i64) define @test_vloxseg4_nxv8i8_nxv8i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8i8_nxv8i8: @@ -6166,18 +6166,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i64(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i64(,,,, i8*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i64(,,,, i8*, , , i64, i64) define @test_vloxseg4_nxv8i8_nxv8i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8i8_nxv8i64: @@ -6198,18 +6198,18 @@ ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg4ei64.v v7, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i32(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i32(,,,, i8*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i32(,,,, i8*, , , i64, i64) define @test_vloxseg4_nxv8i8_nxv8i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8i8_nxv8i32: @@ -6230,18 +6230,18 @@ ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i16(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i16(,,,,, i8*, , , i64) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i16(,,,,, i8*, , , i64, i64) define @test_vloxseg5_nxv8i8_nxv8i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv8i8_nxv8i16: @@ -6264,18 +6264,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i8(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i8(,,,,, i8*, , , i64) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i8(,,,,, i8*, , , i64, i64) define @test_vloxseg5_nxv8i8_nxv8i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv8i8_nxv8i8: @@ -6298,18 +6298,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i64(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i64(,,,,, i8*, , , i64) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i64(,,,,, i8*, , , i64, i64) define @test_vloxseg5_nxv8i8_nxv8i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv8i8_nxv8i64: @@ -6331,18 +6331,18 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg5ei64.v v7, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i32(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i32(,,,,, i8*, , , i64) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i32(,,,,, i8*, , , i64, i64) define @test_vloxseg5_nxv8i8_nxv8i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv8i8_nxv8i32: @@ -6364,18 +6364,18 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i16(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i16(,,,,,, i8*, , , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i16(,,,,,, i8*, , , i64, i64) define @test_vloxseg6_nxv8i8_nxv8i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv8i8_nxv8i16: @@ -6399,18 +6399,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i8(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i8(,,,,,, i8*, , , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i8(,,,,,, i8*, , , i64, i64) define @test_vloxseg6_nxv8i8_nxv8i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv8i8_nxv8i8: @@ -6434,18 +6434,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i64(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i64(,,,,,, i8*, , , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i64(,,,,,, i8*, , , i64, i64) define @test_vloxseg6_nxv8i8_nxv8i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv8i8_nxv8i64: @@ -6468,18 +6468,18 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg6ei64.v v7, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i32(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i32(,,,,,, i8*, , , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i32(,,,,,, i8*, , , i64, i64) define @test_vloxseg6_nxv8i8_nxv8i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv8i8_nxv8i32: @@ -6503,18 +6503,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v12, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i16(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i16(,,,,,,, i8*, , , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i16(,,,,,,, i8*, , , i64, i64) define @test_vloxseg7_nxv8i8_nxv8i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv8i8_nxv8i16: @@ -6539,18 +6539,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i8(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i8(,,,,,,, i8*, , , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i8(,,,,,,, i8*, , , i64, i64) define @test_vloxseg7_nxv8i8_nxv8i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv8i8_nxv8i8: @@ -6575,18 +6575,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i64(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i64(,,,,,,, i8*, , , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i64(,,,,,,, i8*, , , i64, i64) define @test_vloxseg7_nxv8i8_nxv8i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv8i8_nxv8i64: @@ -6610,18 +6610,18 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg7ei64.v v7, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i32(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i32(,,,,,,, i8*, , , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i32(,,,,,,, i8*, , , i64, i64) define @test_vloxseg7_nxv8i8_nxv8i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv8i8_nxv8i32: @@ -6646,18 +6646,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v12, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i16(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i16(,,,,,,,, i8*, , , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i16(,,,,,,,, i8*, , , i64, i64) define @test_vloxseg8_nxv8i8_nxv8i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv8i8_nxv8i16: @@ -6683,18 +6683,18 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i8(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i8(,,,,,,,, i8*, , , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i8(,,,,,,,, i8*, , , i64, i64) define @test_vloxseg8_nxv8i8_nxv8i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv8i8_nxv8i8: @@ -6720,18 +6720,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i64(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i64(,,,,,,,, i8*, , , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i64(,,,,,,,, i8*, , , i64, i64) define @test_vloxseg8_nxv8i8_nxv8i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv8i8_nxv8i64: @@ -6756,18 +6756,18 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg8ei64.v v7, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i32(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i32(,,,,,,,, i8*, , , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i32(,,,,,,,, i8*, , , i64, i64) define @test_vloxseg8_nxv8i8_nxv8i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv8i8_nxv8i32: @@ -6793,18 +6793,18 @@ ; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vmv1r.v v22, v16 ; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv4i64.nxv4i32(i64*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i32(,, i64*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i32(,, i64*, , , i64, i64) define @test_vloxseg2_nxv4i64_nxv4i32(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i64_nxv4i32: @@ -6823,18 +6823,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i32( %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i32( %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv4i64.nxv4i8(i64*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i8(,, i64*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i8(,, i64*, , , i64, i64) define @test_vloxseg2_nxv4i64_nxv4i8(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i64_nxv4i8: @@ -6853,18 +6853,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv4i64_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i8( %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i8( %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv4i64.nxv4i64(i64*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i64(,, i64*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i64(,, i64*, , , i64, i64) define @test_vloxseg2_nxv4i64_nxv4i64(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i64_nxv4i64: @@ -6883,18 +6883,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i64( %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i64( %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv4i64.nxv4i16(i64*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i16(,, i64*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i16(,, i64*, , , i64, i64) define @test_vloxseg2_nxv4i64_nxv4i16(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i64_nxv4i16: @@ -6913,18 +6913,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv4i64_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i16( %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i16( %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i32(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i32(,, i16*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i32(,, i16*, , , i64, i64) define @test_vloxseg2_nxv4i16_nxv4i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i16_nxv4i32: @@ -6943,18 +6943,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i32( %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i32( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i8(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i8(,, i16*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i8(,, i16*, , , i64, i64) define @test_vloxseg2_nxv4i16_nxv4i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i16_nxv4i8: @@ -6973,18 +6973,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i8( %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i8( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i64(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i64(,, i16*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i64(,, i16*, , , i64, i64) define @test_vloxseg2_nxv4i16_nxv4i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i16_nxv4i64: @@ -7003,18 +7003,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i64( %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i64( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i16(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i16(,, i16*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i16(,, i16*, , , i64, i64) define @test_vloxseg2_nxv4i16_nxv4i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i16_nxv4i16: @@ -7033,18 +7033,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i16( %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i16( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i32(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i32(,,, i16*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i32(,,, i16*, , , i64, i64) define @test_vloxseg3_nxv4i16_nxv4i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i16_nxv4i32: @@ -7064,18 +7064,18 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i32( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i32( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i8(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i8(,,, i16*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i8(,,, i16*, , , i64, i64) define @test_vloxseg3_nxv4i16_nxv4i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i16_nxv4i8: @@ -7096,18 +7096,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i64(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i64(,,, i16*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i64(,,, i16*, , , i64, i64) define @test_vloxseg3_nxv4i16_nxv4i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i16_nxv4i64: @@ -7127,18 +7127,18 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i64( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i64( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i16(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i16(,,, i16*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i16(,,, i16*, , , i64, i64) define @test_vloxseg3_nxv4i16_nxv4i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i16_nxv4i16: @@ -7159,18 +7159,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i32(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i32(,,,, i16*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i32(,,,, i16*, , , i64, i64) define @test_vloxseg4_nxv4i16_nxv4i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i16_nxv4i32: @@ -7192,18 +7192,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i8(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i8(,,,, i16*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i8(,,,, i16*, , , i64, i64) define @test_vloxseg4_nxv4i16_nxv4i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i16_nxv4i8: @@ -7225,18 +7225,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i64(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i64(,,,, i16*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i64(,,,, i16*, , , i64, i64) define @test_vloxseg4_nxv4i16_nxv4i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i16_nxv4i64: @@ -7257,18 +7257,18 @@ ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg4ei64.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i16(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i16(,,,, i16*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i16(,,,, i16*, , , i64, i64) define @test_vloxseg4_nxv4i16_nxv4i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i16_nxv4i16: @@ -7290,18 +7290,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i32(i16*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i32(,,,,, i16*, , , i64) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i32(,,,,, i16*, , , i64, i64) define @test_vloxseg5_nxv4i16_nxv4i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4i16_nxv4i32: @@ -7324,18 +7324,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i8(i16*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i8(,,,,, i16*, , , i64) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i8(,,,,, i16*, , , i64, i64) define @test_vloxseg5_nxv4i16_nxv4i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4i16_nxv4i8: @@ -7358,18 +7358,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i64(i16*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i64(,,,,, i16*, , , i64) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i64(,,,,, i16*, , , i64, i64) define @test_vloxseg5_nxv4i16_nxv4i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4i16_nxv4i64: @@ -7391,18 +7391,18 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg5ei64.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i16(i16*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i16(,,,,, i16*, , , i64) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i16(,,,,, i16*, , , i64, i64) define @test_vloxseg5_nxv4i16_nxv4i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4i16_nxv4i16: @@ -7425,18 +7425,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i32(i16*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i32(,,,,,, i16*, , , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i32(,,,,,, i16*, , , i64, i64) define @test_vloxseg6_nxv4i16_nxv4i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4i16_nxv4i32: @@ -7460,18 +7460,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i8(i16*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i8(,,,,,, i16*, , , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i8(,,,,,, i16*, , , i64, i64) define @test_vloxseg6_nxv4i16_nxv4i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4i16_nxv4i8: @@ -7495,18 +7495,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i64(i16*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i64(,,,,,, i16*, , , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i64(,,,,,, i16*, , , i64, i64) define @test_vloxseg6_nxv4i16_nxv4i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4i16_nxv4i64: @@ -7530,18 +7530,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v12, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i16(i16*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i16(,,,,,, i16*, , , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i16(,,,,,, i16*, , , i64, i64) define @test_vloxseg6_nxv4i16_nxv4i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4i16_nxv4i16: @@ -7565,18 +7565,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i32(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i32(,,,,,,, i16*, , , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i32(,,,,,,, i16*, , , i64, i64) define @test_vloxseg7_nxv4i16_nxv4i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4i16_nxv4i32: @@ -7601,18 +7601,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i8(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i8(,,,,,,, i16*, , , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i8(,,,,,,, i16*, , , i64, i64) define @test_vloxseg7_nxv4i16_nxv4i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4i16_nxv4i8: @@ -7637,18 +7637,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i64(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i64(,,,,,,, i16*, , , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i64(,,,,,,, i16*, , , i64, i64) define @test_vloxseg7_nxv4i16_nxv4i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4i16_nxv4i64: @@ -7673,18 +7673,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v12, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i16(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i16(,,,,,,, i16*, , , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i16(,,,,,,, i16*, , , i64, i64) define @test_vloxseg7_nxv4i16_nxv4i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4i16_nxv4i16: @@ -7709,18 +7709,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i32(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i32(,,,,,,,, i16*, , , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i32(,,,,,,,, i16*, , , i64, i64) define @test_vloxseg8_nxv4i16_nxv4i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4i16_nxv4i32: @@ -7746,18 +7746,18 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i8(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i8(,,,,,,,, i16*, , , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i8(,,,,,,,, i16*, , , i64, i64) define @test_vloxseg8_nxv4i16_nxv4i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4i16_nxv4i8: @@ -7783,18 +7783,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i64(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i64(,,,,,,,, i16*, , , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i64(,,,,,,,, i16*, , , i64, i64) define @test_vloxseg8_nxv4i16_nxv4i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4i16_nxv4i64: @@ -7820,18 +7820,18 @@ ; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vmv1r.v v22, v16 ; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg8ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i16(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i16(,,,,,,,, i16*, , , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i16(,,,,,,,, i16*, , , i64, i64) define @test_vloxseg8_nxv4i16_nxv4i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4i16_nxv4i16: @@ -7857,18 +7857,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i64(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i64(,, i8*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i64(,, i8*, , , i64, i64) define @test_vloxseg2_nxv1i8_nxv1i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i8_nxv1i64: @@ -7887,18 +7887,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i64( %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i64( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i32(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i32(,, i8*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i32(,, i8*, , , i64, i64) define @test_vloxseg2_nxv1i8_nxv1i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i8_nxv1i32: @@ -7917,18 +7917,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i32( %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i32( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i16(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i16(,, i8*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i16(,, i8*, , , i64, i64) define @test_vloxseg2_nxv1i8_nxv1i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i8_nxv1i16: @@ -7947,18 +7947,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i16( %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i16( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i8(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i8(,, i8*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i8(,, i8*, , , i64, i64) define @test_vloxseg2_nxv1i8_nxv1i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i8_nxv1i8: @@ -7977,18 +7977,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i8( %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i8( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i64(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i64(,,, i8*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i64(,,, i8*, , , i64, i64) define @test_vloxseg3_nxv1i8_nxv1i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i8_nxv1i64: @@ -8009,18 +8009,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i64( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i64( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i32(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i32(,,, i8*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i32(,,, i8*, , , i64, i64) define @test_vloxseg3_nxv1i8_nxv1i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i8_nxv1i32: @@ -8041,18 +8041,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i16(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i16(,,, i8*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i16(,,, i8*, , , i64, i64) define @test_vloxseg3_nxv1i8_nxv1i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i8_nxv1i16: @@ -8073,18 +8073,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i8(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i8(,,, i8*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i8(,,, i8*, , , i64, i64) define @test_vloxseg3_nxv1i8_nxv1i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i8_nxv1i8: @@ -8105,18 +8105,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i64(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i64(,,,, i8*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i64(,,,, i8*, , , i64, i64) define @test_vloxseg4_nxv1i8_nxv1i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i8_nxv1i64: @@ -8138,18 +8138,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i32(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i32(,,,, i8*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i32(,,,, i8*, , , i64, i64) define @test_vloxseg4_nxv1i8_nxv1i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i8_nxv1i32: @@ -8171,18 +8171,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i16(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i16(,,,, i8*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i16(,,,, i8*, , , i64, i64) define @test_vloxseg4_nxv1i8_nxv1i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i8_nxv1i16: @@ -8204,18 +8204,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i8(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i8(,,,, i8*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i8(,,,, i8*, , , i64, i64) define @test_vloxseg4_nxv1i8_nxv1i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i8_nxv1i8: @@ -8237,18 +8237,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i64(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i64(,,,,, i8*, , , i64) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i64(,,,,, i8*, , , i64, i64) define @test_vloxseg5_nxv1i8_nxv1i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i8_nxv1i64: @@ -8271,18 +8271,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i32(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i32(,,,,, i8*, , , i64) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i32(,,,,, i8*, , , i64, i64) define @test_vloxseg5_nxv1i8_nxv1i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i8_nxv1i32: @@ -8305,18 +8305,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i16(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i16(,,,,, i8*, , , i64) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i16(,,,,, i8*, , , i64, i64) define @test_vloxseg5_nxv1i8_nxv1i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i8_nxv1i16: @@ -8339,18 +8339,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i8(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i8(,,,,, i8*, , , i64) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i8(,,,,, i8*, , , i64, i64) define @test_vloxseg5_nxv1i8_nxv1i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i8_nxv1i8: @@ -8373,18 +8373,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i64(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i64(,,,,,, i8*, , , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i64(,,,,,, i8*, , , i64, i64) define @test_vloxseg6_nxv1i8_nxv1i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i8_nxv1i64: @@ -8408,18 +8408,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i32(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i32(,,,,,, i8*, , , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i32(,,,,,, i8*, , , i64, i64) define @test_vloxseg6_nxv1i8_nxv1i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i8_nxv1i32: @@ -8443,18 +8443,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i16(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i16(,,,,,, i8*, , , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i16(,,,,,, i8*, , , i64, i64) define @test_vloxseg6_nxv1i8_nxv1i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i8_nxv1i16: @@ -8478,18 +8478,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i8(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i8(,,,,,, i8*, , , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i8(,,,,,, i8*, , , i64, i64) define @test_vloxseg6_nxv1i8_nxv1i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i8_nxv1i8: @@ -8513,18 +8513,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i64(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i64(,,,,,,, i8*, , , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i64(,,,,,,, i8*, , , i64, i64) define @test_vloxseg7_nxv1i8_nxv1i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i8_nxv1i64: @@ -8549,18 +8549,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i32(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i32(,,,,,,, i8*, , , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i32(,,,,,,, i8*, , , i64, i64) define @test_vloxseg7_nxv1i8_nxv1i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i8_nxv1i32: @@ -8585,18 +8585,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i16(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i16(,,,,,,, i8*, , , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i16(,,,,,,, i8*, , , i64, i64) define @test_vloxseg7_nxv1i8_nxv1i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i8_nxv1i16: @@ -8621,18 +8621,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i8(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i8(,,,,,,, i8*, , , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i8(,,,,,,, i8*, , , i64, i64) define @test_vloxseg7_nxv1i8_nxv1i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i8_nxv1i8: @@ -8657,18 +8657,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i64(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i64(,,,,,,,, i8*, , , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i64(,,,,,,,, i8*, , , i64, i64) define @test_vloxseg8_nxv1i8_nxv1i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i8_nxv1i64: @@ -8694,18 +8694,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i32(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i32(,,,,,,,, i8*, , , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i32(,,,,,,,, i8*, , , i64, i64) define @test_vloxseg8_nxv1i8_nxv1i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i8_nxv1i32: @@ -8731,18 +8731,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i16(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i16(,,,,,,,, i8*, , , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i16(,,,,,,,, i8*, , , i64, i64) define @test_vloxseg8_nxv1i8_nxv1i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i8_nxv1i16: @@ -8768,18 +8768,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i8(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i8(,,,,,,,, i8*, , , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i8(,,,,,,,, i8*, , , i64, i64) define @test_vloxseg8_nxv1i8_nxv1i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i8_nxv1i8: @@ -8805,18 +8805,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i32(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i32(,, i8*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i32(,, i8*, , , i64, i64) define @test_vloxseg2_nxv2i8_nxv2i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i8_nxv2i32: @@ -8835,18 +8835,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i32( %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i32( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i8(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i8(,, i8*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i8(,, i8*, , , i64, i64) define @test_vloxseg2_nxv2i8_nxv2i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i8_nxv2i8: @@ -8865,18 +8865,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i8( %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i8( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i16(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i16(,, i8*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i16(,, i8*, , , i64, i64) define @test_vloxseg2_nxv2i8_nxv2i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i8_nxv2i16: @@ -8895,18 +8895,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i16( %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i16( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i64(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i64(,, i8*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i64(,, i8*, , , i64, i64) define @test_vloxseg2_nxv2i8_nxv2i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i8_nxv2i64: @@ -8925,18 +8925,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i64( %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i64( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i32(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i32(,,, i8*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i32(,,, i8*, , , i64, i64) define @test_vloxseg3_nxv2i8_nxv2i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i8_nxv2i32: @@ -8957,18 +8957,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i8(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i8(,,, i8*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i8(,,, i8*, , , i64, i64) define @test_vloxseg3_nxv2i8_nxv2i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i8_nxv2i8: @@ -8989,18 +8989,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i16(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i16(,,, i8*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i16(,,, i8*, , , i64, i64) define @test_vloxseg3_nxv2i8_nxv2i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i8_nxv2i16: @@ -9021,18 +9021,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i64(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i64(,,, i8*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i64(,,, i8*, , , i64, i64) define @test_vloxseg3_nxv2i8_nxv2i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i8_nxv2i64: @@ -9052,18 +9052,18 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i64( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i64( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i32(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i32(,,,, i8*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i32(,,,, i8*, , , i64, i64) define @test_vloxseg4_nxv2i8_nxv2i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i8_nxv2i32: @@ -9085,18 +9085,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i8(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i8(,,,, i8*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i8(,,,, i8*, , , i64, i64) define @test_vloxseg4_nxv2i8_nxv2i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i8_nxv2i8: @@ -9118,18 +9118,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i16(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i16(,,,, i8*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i16(,,,, i8*, , , i64, i64) define @test_vloxseg4_nxv2i8_nxv2i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i8_nxv2i16: @@ -9151,18 +9151,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i64(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i64(,,,, i8*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i64(,,,, i8*, , , i64, i64) define @test_vloxseg4_nxv2i8_nxv2i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i8_nxv2i64: @@ -9184,18 +9184,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i32(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i32(,,,,, i8*, , , i64) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i32(,,,,, i8*, , , i64, i64) define @test_vloxseg5_nxv2i8_nxv2i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i8_nxv2i32: @@ -9218,18 +9218,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i8(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i8(,,,,, i8*, , , i64) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i8(,,,,, i8*, , , i64, i64) define @test_vloxseg5_nxv2i8_nxv2i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i8_nxv2i8: @@ -9252,18 +9252,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i16(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i16(,,,,, i8*, , , i64) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i16(,,,,, i8*, , , i64, i64) define @test_vloxseg5_nxv2i8_nxv2i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i8_nxv2i16: @@ -9286,18 +9286,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i64(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i64(,,,,, i8*, , , i64) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i64(,,,,, i8*, , , i64, i64) define @test_vloxseg5_nxv2i8_nxv2i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i8_nxv2i64: @@ -9320,18 +9320,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i32(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i32(,,,,,, i8*, , , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i32(,,,,,, i8*, , , i64, i64) define @test_vloxseg6_nxv2i8_nxv2i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i8_nxv2i32: @@ -9355,18 +9355,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i8(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i8(,,,,,, i8*, , , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i8(,,,,,, i8*, , , i64, i64) define @test_vloxseg6_nxv2i8_nxv2i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i8_nxv2i8: @@ -9390,18 +9390,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i16(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i16(,,,,,, i8*, , , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i16(,,,,,, i8*, , , i64, i64) define @test_vloxseg6_nxv2i8_nxv2i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i8_nxv2i16: @@ -9425,18 +9425,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i64(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i64(,,,,,, i8*, , , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i64(,,,,,, i8*, , , i64, i64) define @test_vloxseg6_nxv2i8_nxv2i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i8_nxv2i64: @@ -9460,18 +9460,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i32(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i32(,,,,,,, i8*, , , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i32(,,,,,,, i8*, , , i64, i64) define @test_vloxseg7_nxv2i8_nxv2i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i8_nxv2i32: @@ -9496,18 +9496,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i8(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i8(,,,,,,, i8*, , , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i8(,,,,,,, i8*, , , i64, i64) define @test_vloxseg7_nxv2i8_nxv2i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i8_nxv2i8: @@ -9532,18 +9532,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i16(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i16(,,,,,,, i8*, , , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i16(,,,,,,, i8*, , , i64, i64) define @test_vloxseg7_nxv2i8_nxv2i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i8_nxv2i16: @@ -9568,18 +9568,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i64(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i64(,,,,,,, i8*, , , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i64(,,,,,,, i8*, , , i64, i64) define @test_vloxseg7_nxv2i8_nxv2i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i8_nxv2i64: @@ -9604,18 +9604,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i32(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i32(,,,,,,,, i8*, , , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i32(,,,,,,,, i8*, , , i64, i64) define @test_vloxseg8_nxv2i8_nxv2i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i8_nxv2i32: @@ -9641,18 +9641,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i8(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i8(,,,,,,,, i8*, , , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i8(,,,,,,,, i8*, , , i64, i64) define @test_vloxseg8_nxv2i8_nxv2i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i8_nxv2i8: @@ -9678,18 +9678,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i16(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i16(,,,,,,,, i8*, , , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i16(,,,,,,,, i8*, , , i64, i64) define @test_vloxseg8_nxv2i8_nxv2i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i8_nxv2i16: @@ -9715,18 +9715,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i64(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i64(,,,,,,,, i8*, , , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i64(,,,,,,,, i8*, , , i64, i64) define @test_vloxseg8_nxv2i8_nxv2i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i8_nxv2i64: @@ -9752,18 +9752,18 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i16(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i16(,, i32*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i16(,, i32*, , , i64, i64) define @test_vloxseg2_nxv8i32_nxv8i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i32_nxv8i16: @@ -9782,18 +9782,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i16( %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i16( %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i8(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i8(,, i32*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i8(,, i32*, , , i64, i64) define @test_vloxseg2_nxv8i32_nxv8i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i32_nxv8i8: @@ -9812,18 +9812,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv8i32_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i8( %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i8( %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i64(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i64(,, i32*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i64(,, i32*, , , i64, i64) define @test_vloxseg2_nxv8i32_nxv8i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i32_nxv8i64: @@ -9842,18 +9842,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v4, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i64( %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i64( %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i32(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i32(,, i32*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i32(,, i32*, , , i64, i64) define @test_vloxseg2_nxv8i32_nxv8i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i32_nxv8i32: @@ -9872,18 +9872,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i32( %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i32( %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv32i8.nxv32i16(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i16(,, i8*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i16(,, i8*, , , i64, i64) define @test_vloxseg2_nxv32i8_nxv32i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv32i8_nxv32i16: @@ -9902,18 +9902,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i16( %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i16( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv32i8.nxv32i8(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i8(,, i8*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i8(,, i8*, , , i64, i64) define @test_vloxseg2_nxv32i8_nxv32i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv32i8_nxv32i8: @@ -9932,18 +9932,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i8( %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i8( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i32(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i32(,, i16*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i32(,, i16*, , , i64, i64) define @test_vloxseg2_nxv2i16_nxv2i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i16_nxv2i32: @@ -9962,18 +9962,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i32( %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i32( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i8(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i8(,, i16*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i8(,, i16*, , , i64, i64) define @test_vloxseg2_nxv2i16_nxv2i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i16_nxv2i8: @@ -9992,18 +9992,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i8( %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i8( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i16(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i16(,, i16*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i16(,, i16*, , , i64, i64) define @test_vloxseg2_nxv2i16_nxv2i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i16_nxv2i16: @@ -10022,18 +10022,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i16( %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i16( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i64(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i64(,, i16*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i64(,, i16*, , , i64, i64) define @test_vloxseg2_nxv2i16_nxv2i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i16_nxv2i64: @@ -10052,18 +10052,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i64( %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i64( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i32(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i32(,,, i16*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i32(,,, i16*, , , i64, i64) define @test_vloxseg3_nxv2i16_nxv2i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i16_nxv2i32: @@ -10084,18 +10084,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i32( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i32( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i8(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i8(,,, i16*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i8(,,, i16*, , , i64, i64) define @test_vloxseg3_nxv2i16_nxv2i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i16_nxv2i8: @@ -10116,18 +10116,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i16(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i16(,,, i16*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i16(,,, i16*, , , i64, i64) define @test_vloxseg3_nxv2i16_nxv2i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i16_nxv2i16: @@ -10148,18 +10148,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i64(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i64(,,, i16*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i64(,,, i16*, , , i64, i64) define @test_vloxseg3_nxv2i16_nxv2i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i16_nxv2i64: @@ -10179,18 +10179,18 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i64( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i64( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i32(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i32(,,,, i16*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i32(,,,, i16*, , , i64, i64) define @test_vloxseg4_nxv2i16_nxv2i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i16_nxv2i32: @@ -10212,18 +10212,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i8(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i8(,,,, i16*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i8(,,,, i16*, , , i64, i64) define @test_vloxseg4_nxv2i16_nxv2i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i16_nxv2i8: @@ -10245,18 +10245,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i16(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i16(,,,, i16*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i16(,,,, i16*, , , i64, i64) define @test_vloxseg4_nxv2i16_nxv2i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i16_nxv2i16: @@ -10278,18 +10278,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i64(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i64(,,,, i16*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i64(,,,, i16*, , , i64, i64) define @test_vloxseg4_nxv2i16_nxv2i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i16_nxv2i64: @@ -10311,18 +10311,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i32(i16*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i32(,,,,, i16*, , , i64) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i32(,,,,, i16*, , , i64, i64) define @test_vloxseg5_nxv2i16_nxv2i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i16_nxv2i32: @@ -10345,18 +10345,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i8(i16*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i8(,,,,, i16*, , , i64) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i8(,,,,, i16*, , , i64, i64) define @test_vloxseg5_nxv2i16_nxv2i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i16_nxv2i8: @@ -10379,18 +10379,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i16(i16*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i16(,,,,, i16*, , , i64) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i16(,,,,, i16*, , , i64, i64) define @test_vloxseg5_nxv2i16_nxv2i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i16_nxv2i16: @@ -10413,18 +10413,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i64(i16*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i64(,,,,, i16*, , , i64) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i64(,,,,, i16*, , , i64, i64) define @test_vloxseg5_nxv2i16_nxv2i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i16_nxv2i64: @@ -10447,18 +10447,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i32(i16*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i32(,,,,,, i16*, , , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i32(,,,,,, i16*, , , i64, i64) define @test_vloxseg6_nxv2i16_nxv2i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i16_nxv2i32: @@ -10482,18 +10482,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i8(i16*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i8(,,,,,, i16*, , , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i8(,,,,,, i16*, , , i64, i64) define @test_vloxseg6_nxv2i16_nxv2i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i16_nxv2i8: @@ -10517,18 +10517,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i16(i16*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i16(,,,,,, i16*, , , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i16(,,,,,, i16*, , , i64, i64) define @test_vloxseg6_nxv2i16_nxv2i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i16_nxv2i16: @@ -10552,18 +10552,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i64(i16*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i64(,,,,,, i16*, , , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i64(,,,,,, i16*, , , i64, i64) define @test_vloxseg6_nxv2i16_nxv2i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i16_nxv2i64: @@ -10587,18 +10587,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i32(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i32(,,,,,,, i16*, , , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i32(,,,,,,, i16*, , , i64, i64) define @test_vloxseg7_nxv2i16_nxv2i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i16_nxv2i32: @@ -10623,18 +10623,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i8(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i8(,,,,,,, i16*, , , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i8(,,,,,,, i16*, , , i64, i64) define @test_vloxseg7_nxv2i16_nxv2i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i16_nxv2i8: @@ -10659,18 +10659,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i16(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i16(,,,,,,, i16*, , , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i16(,,,,,,, i16*, , , i64, i64) define @test_vloxseg7_nxv2i16_nxv2i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i16_nxv2i16: @@ -10695,18 +10695,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i64(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i64(,,,,,,, i16*, , , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i64(,,,,,,, i16*, , , i64, i64) define @test_vloxseg7_nxv2i16_nxv2i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i16_nxv2i64: @@ -10731,18 +10731,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i32(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i32(,,,,,,,, i16*, , , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i32(,,,,,,,, i16*, , , i64, i64) define @test_vloxseg8_nxv2i16_nxv2i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i16_nxv2i32: @@ -10768,18 +10768,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i8(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i8(,,,,,,,, i16*, , , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i8(,,,,,,,, i16*, , , i64, i64) define @test_vloxseg8_nxv2i16_nxv2i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i16_nxv2i8: @@ -10805,18 +10805,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i16(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i16(,,,,,,,, i16*, , , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i16(,,,,,,,, i16*, , , i64, i64) define @test_vloxseg8_nxv2i16_nxv2i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i16_nxv2i16: @@ -10842,18 +10842,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i64(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i64(,,,,,,,, i16*, , , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i64(,,,,,,,, i16*, , , i64, i64) define @test_vloxseg8_nxv2i16_nxv2i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i16_nxv2i64: @@ -10879,18 +10879,18 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv2i64.nxv2i32(i64*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i32(,, i64*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i32(,, i64*, , , i64, i64) define @test_vloxseg2_nxv2i64_nxv2i32(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i64_nxv2i32: @@ -10909,18 +10909,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i32( %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i32( %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv2i64.nxv2i8(i64*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i8(,, i64*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i8(,, i64*, , , i64, i64) define @test_vloxseg2_nxv2i64_nxv2i8(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i64_nxv2i8: @@ -10939,18 +10939,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv2i64_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i8( %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i8( %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv2i64.nxv2i16(i64*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i16(,, i64*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i16(,, i64*, , , i64, i64) define @test_vloxseg2_nxv2i64_nxv2i16(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i64_nxv2i16: @@ -10969,18 +10969,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv2i64_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i16( %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i16( %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv2i64.nxv2i64(i64*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i64(,, i64*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i64(,, i64*, , , i64, i64) define @test_vloxseg2_nxv2i64_nxv2i64(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i64_nxv2i64: @@ -10999,18 +10999,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i64( %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i64( %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv2i32(i64*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i32(,,, i64*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i32(,,, i64*, , , i64, i64) define @test_vloxseg3_nxv2i64_nxv2i32(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i64_nxv2i32: @@ -11031,18 +11031,18 @@ ; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i32( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i32( %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv2i8(i64*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i8(,,, i64*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i8(,,, i64*, , , i64, i64) define @test_vloxseg3_nxv2i64_nxv2i8(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i64_nxv2i8: @@ -11063,18 +11063,18 @@ ; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i8( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i8( %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv2i16(i64*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i16(,,, i64*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i16(,,, i64*, , , i64, i64) define @test_vloxseg3_nxv2i64_nxv2i16(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i64_nxv2i16: @@ -11095,18 +11095,18 @@ ; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i16( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i16( %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv2i64(i64*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i64(,,, i64*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i64(,,, i64*, , , i64, i64) define @test_vloxseg3_nxv2i64_nxv2i64(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i64_nxv2i64: @@ -11127,18 +11127,18 @@ ; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg3ei64.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i64( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i64( %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv2i32(i64*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i32(,,,, i64*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i32(,,,, i64*, , , i64, i64) define @test_vloxseg4_nxv2i64_nxv2i32(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i64_nxv2i32: @@ -11160,18 +11160,18 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i32( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i32( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv2i8(i64*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i8(,,,, i64*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i8(,,,, i64*, , , i64, i64) define @test_vloxseg4_nxv2i64_nxv2i8(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i64_nxv2i8: @@ -11193,18 +11193,18 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i8( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i8( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv2i16(i64*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i16(,,,, i64*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i16(,,,, i64*, , , i64, i64) define @test_vloxseg4_nxv2i64_nxv2i16(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i64_nxv2i16: @@ -11226,18 +11226,18 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i16( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i16( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv2i64(i64*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i64(,,,, i64*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i64(,,,, i64*, , , i64, i64) define @test_vloxseg4_nxv2i64_nxv2i64(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i64_nxv2i64: @@ -11259,18 +11259,18 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg4ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i64( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i64( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i16(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i16(,, half*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i16(,, half*, , , i64, i64) define @test_vloxseg2_nxv16f16_nxv16i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16f16_nxv16i16: @@ -11289,18 +11289,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i16( %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i16( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i8(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i8(,, half*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i8(,, half*, , , i64, i64) define @test_vloxseg2_nxv16f16_nxv16i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16f16_nxv16i8: @@ -11319,18 +11319,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i8( %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i8( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i32(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i32(,, half*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i32(,, half*, , , i64, i64) define @test_vloxseg2_nxv16f16_nxv16i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16f16_nxv16i32: @@ -11349,18 +11349,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v4, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i32( %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i32( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i32(double*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i32(,, double*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i32(,, double*, , , i64, i64) define @test_vloxseg2_nxv4f64_nxv4i32(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f64_nxv4i32: @@ -11379,18 +11379,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i32( %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i32( %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i8(double*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i8(,, double*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i8(,, double*, , , i64, i64) define @test_vloxseg2_nxv4f64_nxv4i8(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f64_nxv4i8: @@ -11409,18 +11409,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv4f64_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i8( %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i8( %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i64(double*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i64(,, double*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i64(,, double*, , , i64, i64) define @test_vloxseg2_nxv4f64_nxv4i64(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f64_nxv4i64: @@ -11439,18 +11439,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i64( %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i64( %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i16(double*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i16(,, double*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i16(,, double*, , , i64, i64) define @test_vloxseg2_nxv4f64_nxv4i16(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f64_nxv4i16: @@ -11469,18 +11469,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv4f64_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i16( %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i16( %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i64(double*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i64(,, double*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i64(,, double*, , , i64, i64) define @test_vloxseg2_nxv1f64_nxv1i64(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f64_nxv1i64: @@ -11499,18 +11499,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i64( %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i64( %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i32(double*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i32(,, double*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i32(,, double*, , , i64, i64) define @test_vloxseg2_nxv1f64_nxv1i32(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f64_nxv1i32: @@ -11529,18 +11529,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i32( %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i32( %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i16(double*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i16(,, double*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i16(,, double*, , , i64, i64) define @test_vloxseg2_nxv1f64_nxv1i16(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f64_nxv1i16: @@ -11559,18 +11559,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i16( %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i16( %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i8(double*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i8(,, double*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i8(,, double*, , , i64, i64) define @test_vloxseg2_nxv1f64_nxv1i8(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f64_nxv1i8: @@ -11589,18 +11589,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i8( %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i8( %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i64(double*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i64(,,, double*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i64(,,, double*, , , i64, i64) define @test_vloxseg3_nxv1f64_nxv1i64(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f64_nxv1i64: @@ -11621,18 +11621,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i64( %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i64( %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i32(double*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i32(,,, double*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i32(,,, double*, , , i64, i64) define @test_vloxseg3_nxv1f64_nxv1i32(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f64_nxv1i32: @@ -11653,18 +11653,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i32( %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i32( %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i16(double*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i16(,,, double*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i16(,,, double*, , , i64, i64) define @test_vloxseg3_nxv1f64_nxv1i16(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f64_nxv1i16: @@ -11685,18 +11685,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i16( %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i16( %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i8(double*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i8(,,, double*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i8(,,, double*, , , i64, i64) define @test_vloxseg3_nxv1f64_nxv1i8(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f64_nxv1i8: @@ -11717,18 +11717,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i8( %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i8( %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i64(double*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i64(,,,, double*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i64(,,,, double*, , , i64, i64) define @test_vloxseg4_nxv1f64_nxv1i64(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f64_nxv1i64: @@ -11750,18 +11750,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i32(double*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i32(,,,, double*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i32(,,,, double*, , , i64, i64) define @test_vloxseg4_nxv1f64_nxv1i32(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f64_nxv1i32: @@ -11783,18 +11783,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i16(double*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i16(,,,, double*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i16(,,,, double*, , , i64, i64) define @test_vloxseg4_nxv1f64_nxv1i16(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f64_nxv1i16: @@ -11816,18 +11816,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i8(double*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i8(,,,, double*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i8(,,,, double*, , , i64, i64) define @test_vloxseg4_nxv1f64_nxv1i8(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f64_nxv1i8: @@ -11849,18 +11849,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i64(double*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i64(,,,,, double*, , , i64) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i64(,,,,, double*, , , i64, i64) define @test_vloxseg5_nxv1f64_nxv1i64(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f64_nxv1i64: @@ -11883,18 +11883,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i32(double*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i32(,,,,, double*, , , i64) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i32(,,,,, double*, , , i64, i64) define @test_vloxseg5_nxv1f64_nxv1i32(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f64_nxv1i32: @@ -11917,18 +11917,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i16(double*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i16(,,,,, double*, , , i64) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i16(,,,,, double*, , , i64, i64) define @test_vloxseg5_nxv1f64_nxv1i16(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f64_nxv1i16: @@ -11951,18 +11951,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i8(double*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i8(,,,,, double*, , , i64) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i8(,,,,, double*, , , i64, i64) define @test_vloxseg5_nxv1f64_nxv1i8(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f64_nxv1i8: @@ -11985,18 +11985,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i64(double*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i64(,,,,,, double*, , , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i64(,,,,,, double*, , , i64, i64) define @test_vloxseg6_nxv1f64_nxv1i64(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f64_nxv1i64: @@ -12020,18 +12020,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i32(double*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i32(,,,,,, double*, , , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i32(,,,,,, double*, , , i64, i64) define @test_vloxseg6_nxv1f64_nxv1i32(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f64_nxv1i32: @@ -12055,18 +12055,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i16(double*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i16(,,,,,, double*, , , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i16(,,,,,, double*, , , i64, i64) define @test_vloxseg6_nxv1f64_nxv1i16(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f64_nxv1i16: @@ -12090,18 +12090,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i8(double*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i8(,,,,,, double*, , , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i8(,,,,,, double*, , , i64, i64) define @test_vloxseg6_nxv1f64_nxv1i8(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f64_nxv1i8: @@ -12125,18 +12125,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i64(double*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i64(,,,,,,, double*, , , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i64(,,,,,,, double*, , , i64, i64) define @test_vloxseg7_nxv1f64_nxv1i64(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f64_nxv1i64: @@ -12161,18 +12161,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i32(double*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i32(,,,,,,, double*, , , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i32(,,,,,,, double*, , , i64, i64) define @test_vloxseg7_nxv1f64_nxv1i32(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f64_nxv1i32: @@ -12197,18 +12197,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i16(double*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i16(,,,,,,, double*, , , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i16(,,,,,,, double*, , , i64, i64) define @test_vloxseg7_nxv1f64_nxv1i16(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f64_nxv1i16: @@ -12233,18 +12233,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i8(double*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i8(,,,,,,, double*, , , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i8(,,,,,,, double*, , , i64, i64) define @test_vloxseg7_nxv1f64_nxv1i8(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f64_nxv1i8: @@ -12269,18 +12269,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i64(double*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i64(,,,,,,,, double*, , , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i64(,,,,,,,, double*, , , i64, i64) define @test_vloxseg8_nxv1f64_nxv1i64(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f64_nxv1i64: @@ -12306,18 +12306,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i32(double*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i32(,,,,,,,, double*, , , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i32(,,,,,,,, double*, , , i64, i64) define @test_vloxseg8_nxv1f64_nxv1i32(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f64_nxv1i32: @@ -12343,18 +12343,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i16(double*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i16(,,,,,,,, double*, , , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i16(,,,,,,,, double*, , , i64, i64) define @test_vloxseg8_nxv1f64_nxv1i16(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f64_nxv1i16: @@ -12380,18 +12380,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i8(double*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i8(,,,,,,,, double*, , , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i8(,,,,,,,, double*, , , i64, i64) define @test_vloxseg8_nxv1f64_nxv1i8(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f64_nxv1i8: @@ -12417,18 +12417,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i32(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i32(,, float*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i32(,, float*, , , i64, i64) define @test_vloxseg2_nxv2f32_nxv2i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f32_nxv2i32: @@ -12447,18 +12447,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i32( %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i32( %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i8(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i8(,, float*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i8(,, float*, , , i64, i64) define @test_vloxseg2_nxv2f32_nxv2i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f32_nxv2i8: @@ -12477,18 +12477,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i8( %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i8( %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i16(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i16(,, float*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i16(,, float*, , , i64, i64) define @test_vloxseg2_nxv2f32_nxv2i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f32_nxv2i16: @@ -12507,18 +12507,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i16( %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i16( %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i64(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i64(,, float*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i64(,, float*, , , i64, i64) define @test_vloxseg2_nxv2f32_nxv2i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f32_nxv2i64: @@ -12537,18 +12537,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i64( %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i64( %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i32(float*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i32(,,, float*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i32(,,, float*, , , i64, i64) define @test_vloxseg3_nxv2f32_nxv2i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f32_nxv2i32: @@ -12569,18 +12569,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i32( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i32( %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i8(float*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i8(,,, float*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i8(,,, float*, , , i64, i64) define @test_vloxseg3_nxv2f32_nxv2i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f32_nxv2i8: @@ -12601,18 +12601,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i8( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i8( %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i16(float*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i16(,,, float*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i16(,,, float*, , , i64, i64) define @test_vloxseg3_nxv2f32_nxv2i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f32_nxv2i16: @@ -12633,18 +12633,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i16( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i16( %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i64(float*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i64(,,, float*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i64(,,, float*, , , i64, i64) define @test_vloxseg3_nxv2f32_nxv2i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f32_nxv2i64: @@ -12664,18 +12664,18 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i64( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i64( %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i32(float*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i32(,,,, float*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i32(,,,, float*, , , i64, i64) define @test_vloxseg4_nxv2f32_nxv2i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f32_nxv2i32: @@ -12697,18 +12697,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i8(float*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i8(,,,, float*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i8(,,,, float*, , , i64, i64) define @test_vloxseg4_nxv2f32_nxv2i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f32_nxv2i8: @@ -12730,18 +12730,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i16(float*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i16(,,,, float*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i16(,,,, float*, , , i64, i64) define @test_vloxseg4_nxv2f32_nxv2i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f32_nxv2i16: @@ -12763,18 +12763,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i64(float*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i64(,,,, float*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i64(,,,, float*, , , i64, i64) define @test_vloxseg4_nxv2f32_nxv2i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f32_nxv2i64: @@ -12796,18 +12796,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i32(float*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i32(,,,,, float*, , , i64) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i32(,,,,, float*, , , i64, i64) define @test_vloxseg5_nxv2f32_nxv2i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2f32_nxv2i32: @@ -12830,18 +12830,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i8(float*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i8(,,,,, float*, , , i64) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i8(,,,,, float*, , , i64, i64) define @test_vloxseg5_nxv2f32_nxv2i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2f32_nxv2i8: @@ -12864,18 +12864,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i16(float*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i16(,,,,, float*, , , i64) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i16(,,,,, float*, , , i64, i64) define @test_vloxseg5_nxv2f32_nxv2i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2f32_nxv2i16: @@ -12898,18 +12898,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i64(float*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i64(,,,,, float*, , , i64) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i64(,,,,, float*, , , i64, i64) define @test_vloxseg5_nxv2f32_nxv2i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2f32_nxv2i64: @@ -12932,18 +12932,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i32(float*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i32(,,,,,, float*, , , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i32(,,,,,, float*, , , i64, i64) define @test_vloxseg6_nxv2f32_nxv2i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2f32_nxv2i32: @@ -12967,18 +12967,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i8(float*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i8(,,,,,, float*, , , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i8(,,,,,, float*, , , i64, i64) define @test_vloxseg6_nxv2f32_nxv2i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2f32_nxv2i8: @@ -13002,18 +13002,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i16(float*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i16(,,,,,, float*, , , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i16(,,,,,, float*, , , i64, i64) define @test_vloxseg6_nxv2f32_nxv2i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2f32_nxv2i16: @@ -13037,18 +13037,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i64(float*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i64(,,,,,, float*, , , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i64(,,,,,, float*, , , i64, i64) define @test_vloxseg6_nxv2f32_nxv2i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2f32_nxv2i64: @@ -13072,18 +13072,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i32(float*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i32(,,,,,,, float*, , , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i32(,,,,,,, float*, , , i64, i64) define @test_vloxseg7_nxv2f32_nxv2i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2f32_nxv2i32: @@ -13108,18 +13108,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i8(float*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i8(,,,,,,, float*, , , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i8(,,,,,,, float*, , , i64, i64) define @test_vloxseg7_nxv2f32_nxv2i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2f32_nxv2i8: @@ -13144,18 +13144,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i16(float*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i16(,,,,,,, float*, , , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i16(,,,,,,, float*, , , i64, i64) define @test_vloxseg7_nxv2f32_nxv2i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2f32_nxv2i16: @@ -13180,18 +13180,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i64(float*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i64(,,,,,,, float*, , , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i64(,,,,,,, float*, , , i64, i64) define @test_vloxseg7_nxv2f32_nxv2i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2f32_nxv2i64: @@ -13216,18 +13216,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i32(float*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i32(,,,,,,,, float*, , , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i32(,,,,,,,, float*, , , i64, i64) define @test_vloxseg8_nxv2f32_nxv2i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2f32_nxv2i32: @@ -13253,18 +13253,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i8(float*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i8(,,,,,,,, float*, , , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i8(,,,,,,,, float*, , , i64, i64) define @test_vloxseg8_nxv2f32_nxv2i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2f32_nxv2i8: @@ -13290,18 +13290,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i16(float*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i16(,,,,,,,, float*, , , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i16(,,,,,,,, float*, , , i64, i64) define @test_vloxseg8_nxv2f32_nxv2i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2f32_nxv2i16: @@ -13327,18 +13327,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i64(float*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i64(,,,,,,,, float*, , , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i64(,,,,,,,, float*, , , i64, i64) define @test_vloxseg8_nxv2f32_nxv2i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2f32_nxv2i64: @@ -13364,18 +13364,18 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i64(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i64(,, half*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i64(,, half*, , , i64, i64) define @test_vloxseg2_nxv1f16_nxv1i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f16_nxv1i64: @@ -13394,18 +13394,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i64( %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i64( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i32(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i32(,, half*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i32(,, half*, , , i64, i64) define @test_vloxseg2_nxv1f16_nxv1i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f16_nxv1i32: @@ -13424,18 +13424,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i32( %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i32( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i16(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i16(,, half*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i16(,, half*, , , i64, i64) define @test_vloxseg2_nxv1f16_nxv1i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f16_nxv1i16: @@ -13454,18 +13454,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i16( %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i16( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i8(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i8(,, half*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i8(,, half*, , , i64, i64) define @test_vloxseg2_nxv1f16_nxv1i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f16_nxv1i8: @@ -13484,18 +13484,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i8( %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i8( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i64(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i64(,,, half*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i64(,,, half*, , , i64, i64) define @test_vloxseg3_nxv1f16_nxv1i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f16_nxv1i64: @@ -13516,18 +13516,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i64( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i64( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i32(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i32(,,, half*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i32(,,, half*, , , i64, i64) define @test_vloxseg3_nxv1f16_nxv1i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f16_nxv1i32: @@ -13548,18 +13548,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i32( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i32( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i16(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i16(,,, half*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i16(,,, half*, , , i64, i64) define @test_vloxseg3_nxv1f16_nxv1i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f16_nxv1i16: @@ -13580,18 +13580,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i8(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i8(,,, half*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i8(,,, half*, , , i64, i64) define @test_vloxseg3_nxv1f16_nxv1i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f16_nxv1i8: @@ -13612,18 +13612,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i64(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i64(,,,, half*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i64(,,,, half*, , , i64, i64) define @test_vloxseg4_nxv1f16_nxv1i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f16_nxv1i64: @@ -13645,18 +13645,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i32(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i32(,,,, half*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i32(,,,, half*, , , i64, i64) define @test_vloxseg4_nxv1f16_nxv1i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f16_nxv1i32: @@ -13678,18 +13678,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i16(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i16(,,,, half*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i16(,,,, half*, , , i64, i64) define @test_vloxseg4_nxv1f16_nxv1i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f16_nxv1i16: @@ -13711,18 +13711,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i8(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i8(,,,, half*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i8(,,,, half*, , , i64, i64) define @test_vloxseg4_nxv1f16_nxv1i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f16_nxv1i8: @@ -13744,18 +13744,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i64(half*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i64(,,,,, half*, , , i64) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i64(,,,,, half*, , , i64, i64) define @test_vloxseg5_nxv1f16_nxv1i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f16_nxv1i64: @@ -13778,18 +13778,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i32(half*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i32(,,,,, half*, , , i64) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i32(,,,,, half*, , , i64, i64) define @test_vloxseg5_nxv1f16_nxv1i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f16_nxv1i32: @@ -13812,18 +13812,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i16(half*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i16(,,,,, half*, , , i64) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i16(,,,,, half*, , , i64, i64) define @test_vloxseg5_nxv1f16_nxv1i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f16_nxv1i16: @@ -13846,18 +13846,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i8(half*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i8(,,,,, half*, , , i64) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i8(,,,,, half*, , , i64, i64) define @test_vloxseg5_nxv1f16_nxv1i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f16_nxv1i8: @@ -13880,18 +13880,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i64(half*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i64(,,,,,, half*, , , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i64(,,,,,, half*, , , i64, i64) define @test_vloxseg6_nxv1f16_nxv1i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f16_nxv1i64: @@ -13915,18 +13915,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i32(half*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i32(,,,,,, half*, , , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i32(,,,,,, half*, , , i64, i64) define @test_vloxseg6_nxv1f16_nxv1i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f16_nxv1i32: @@ -13950,18 +13950,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i16(half*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i16(,,,,,, half*, , , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i16(,,,,,, half*, , , i64, i64) define @test_vloxseg6_nxv1f16_nxv1i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f16_nxv1i16: @@ -13985,18 +13985,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i8(half*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i8(,,,,,, half*, , , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i8(,,,,,, half*, , , i64, i64) define @test_vloxseg6_nxv1f16_nxv1i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f16_nxv1i8: @@ -14020,18 +14020,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i64(half*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i64(,,,,,,, half*, , , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i64(,,,,,,, half*, , , i64, i64) define @test_vloxseg7_nxv1f16_nxv1i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f16_nxv1i64: @@ -14056,18 +14056,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i32(half*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i32(,,,,,,, half*, , , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i32(,,,,,,, half*, , , i64, i64) define @test_vloxseg7_nxv1f16_nxv1i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f16_nxv1i32: @@ -14092,18 +14092,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i16(half*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i16(,,,,,,, half*, , , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i16(,,,,,,, half*, , , i64, i64) define @test_vloxseg7_nxv1f16_nxv1i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f16_nxv1i16: @@ -14128,18 +14128,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i8(half*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i8(,,,,,,, half*, , , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i8(,,,,,,, half*, , , i64, i64) define @test_vloxseg7_nxv1f16_nxv1i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f16_nxv1i8: @@ -14164,18 +14164,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i64(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i64(,,,,,,,, half*, , , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i64(,,,,,,,, half*, , , i64, i64) define @test_vloxseg8_nxv1f16_nxv1i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f16_nxv1i64: @@ -14201,18 +14201,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i32(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i32(,,,,,,,, half*, , , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i32(,,,,,,,, half*, , , i64, i64) define @test_vloxseg8_nxv1f16_nxv1i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f16_nxv1i32: @@ -14238,18 +14238,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i16(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i16(,,,,,,,, half*, , , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i16(,,,,,,,, half*, , , i64, i64) define @test_vloxseg8_nxv1f16_nxv1i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f16_nxv1i16: @@ -14275,18 +14275,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i8(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i8(,,,,,,,, half*, , , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i8(,,,,,,,, half*, , , i64, i64) define @test_vloxseg8_nxv1f16_nxv1i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f16_nxv1i8: @@ -14312,18 +14312,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i64(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i64(,, float*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i64(,, float*, , , i64, i64) define @test_vloxseg2_nxv1f32_nxv1i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f32_nxv1i64: @@ -14342,18 +14342,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i64( %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i64( %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i32(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i32(,, float*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i32(,, float*, , , i64, i64) define @test_vloxseg2_nxv1f32_nxv1i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f32_nxv1i32: @@ -14372,18 +14372,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i32( %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i32( %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i16(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i16(,, float*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i16(,, float*, , , i64, i64) define @test_vloxseg2_nxv1f32_nxv1i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f32_nxv1i16: @@ -14402,18 +14402,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i16( %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i16( %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i8(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i8(,, float*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i8(,, float*, , , i64, i64) define @test_vloxseg2_nxv1f32_nxv1i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f32_nxv1i8: @@ -14432,18 +14432,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i8( %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i8( %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i64(float*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i64(,,, float*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i64(,,, float*, , , i64, i64) define @test_vloxseg3_nxv1f32_nxv1i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f32_nxv1i64: @@ -14464,18 +14464,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i64( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i64( %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i32(float*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i32(,,, float*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i32(,,, float*, , , i64, i64) define @test_vloxseg3_nxv1f32_nxv1i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f32_nxv1i32: @@ -14496,18 +14496,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i32( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i32( %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i16(float*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i16(,,, float*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i16(,,, float*, , , i64, i64) define @test_vloxseg3_nxv1f32_nxv1i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f32_nxv1i16: @@ -14528,18 +14528,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i16( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i16( %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i8(float*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i8(,,, float*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i8(,,, float*, , , i64, i64) define @test_vloxseg3_nxv1f32_nxv1i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f32_nxv1i8: @@ -14560,18 +14560,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i8( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i8( %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i64(float*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i64(,,,, float*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i64(,,,, float*, , , i64, i64) define @test_vloxseg4_nxv1f32_nxv1i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f32_nxv1i64: @@ -14593,18 +14593,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i32(float*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i32(,,,, float*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i32(,,,, float*, , , i64, i64) define @test_vloxseg4_nxv1f32_nxv1i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f32_nxv1i32: @@ -14626,18 +14626,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i16(float*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i16(,,,, float*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i16(,,,, float*, , , i64, i64) define @test_vloxseg4_nxv1f32_nxv1i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f32_nxv1i16: @@ -14659,18 +14659,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i8(float*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i8(,,,, float*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i8(,,,, float*, , , i64, i64) define @test_vloxseg4_nxv1f32_nxv1i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f32_nxv1i8: @@ -14692,18 +14692,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i64(float*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i64(,,,,, float*, , , i64) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i64(,,,,, float*, , , i64, i64) define @test_vloxseg5_nxv1f32_nxv1i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f32_nxv1i64: @@ -14726,18 +14726,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i32(float*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i32(,,,,, float*, , , i64) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i32(,,,,, float*, , , i64, i64) define @test_vloxseg5_nxv1f32_nxv1i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f32_nxv1i32: @@ -14760,18 +14760,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i16(float*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i16(,,,,, float*, , , i64) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i16(,,,,, float*, , , i64, i64) define @test_vloxseg5_nxv1f32_nxv1i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f32_nxv1i16: @@ -14794,18 +14794,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i8(float*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i8(,,,,, float*, , , i64) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i8(,,,,, float*, , , i64, i64) define @test_vloxseg5_nxv1f32_nxv1i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f32_nxv1i8: @@ -14828,18 +14828,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i64(float*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i64(,,,,,, float*, , , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i64(,,,,,, float*, , , i64, i64) define @test_vloxseg6_nxv1f32_nxv1i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f32_nxv1i64: @@ -14863,18 +14863,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i32(float*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i32(,,,,,, float*, , , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i32(,,,,,, float*, , , i64, i64) define @test_vloxseg6_nxv1f32_nxv1i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f32_nxv1i32: @@ -14898,18 +14898,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i16(float*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i16(,,,,,, float*, , , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i16(,,,,,, float*, , , i64, i64) define @test_vloxseg6_nxv1f32_nxv1i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f32_nxv1i16: @@ -14933,18 +14933,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i8(float*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i8(,,,,,, float*, , , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i8(,,,,,, float*, , , i64, i64) define @test_vloxseg6_nxv1f32_nxv1i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f32_nxv1i8: @@ -14968,18 +14968,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i64(float*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i64(,,,,,,, float*, , , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i64(,,,,,,, float*, , , i64, i64) define @test_vloxseg7_nxv1f32_nxv1i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f32_nxv1i64: @@ -15004,18 +15004,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i32(float*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i32(,,,,,,, float*, , , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i32(,,,,,,, float*, , , i64, i64) define @test_vloxseg7_nxv1f32_nxv1i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f32_nxv1i32: @@ -15040,18 +15040,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i16(float*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i16(,,,,,,, float*, , , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i16(,,,,,,, float*, , , i64, i64) define @test_vloxseg7_nxv1f32_nxv1i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f32_nxv1i16: @@ -15076,18 +15076,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i8(float*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i8(,,,,,,, float*, , , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i8(,,,,,,, float*, , , i64, i64) define @test_vloxseg7_nxv1f32_nxv1i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f32_nxv1i8: @@ -15112,18 +15112,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i64(float*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i64(,,,,,,,, float*, , , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i64(,,,,,,,, float*, , , i64, i64) define @test_vloxseg8_nxv1f32_nxv1i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f32_nxv1i64: @@ -15149,18 +15149,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i32(float*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i32(,,,,,,,, float*, , , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i32(,,,,,,,, float*, , , i64, i64) define @test_vloxseg8_nxv1f32_nxv1i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f32_nxv1i32: @@ -15186,18 +15186,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i16(float*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i16(,,,,,,,, float*, , , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i16(,,,,,,,, float*, , , i64, i64) define @test_vloxseg8_nxv1f32_nxv1i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f32_nxv1i16: @@ -15223,18 +15223,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i8(float*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i8(,,,,,,,, float*, , , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i8(,,,,,,,, float*, , , i64, i64) define @test_vloxseg8_nxv1f32_nxv1i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f32_nxv1i8: @@ -15260,18 +15260,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i16(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i16(,, half*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i16(,, half*, , , i64, i64) define @test_vloxseg2_nxv8f16_nxv8i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8f16_nxv8i16: @@ -15290,18 +15290,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i16( %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i16( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i8(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i8(,, half*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i8(,, half*, , , i64, i64) define @test_vloxseg2_nxv8f16_nxv8i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8f16_nxv8i8: @@ -15320,18 +15320,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i8( %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i8( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i64(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i64(,, half*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i64(,, half*, , , i64, i64) define @test_vloxseg2_nxv8f16_nxv8i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8f16_nxv8i64: @@ -15350,18 +15350,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv8f16_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v6, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i64( %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i64( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i32(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i32(,, half*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i32(,, half*, , , i64, i64) define @test_vloxseg2_nxv8f16_nxv8i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8f16_nxv8i32: @@ -15380,18 +15380,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i32( %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i32( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i16(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i16(,,, half*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i16(,,, half*, , , i64, i64) define @test_vloxseg3_nxv8f16_nxv8i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8f16_nxv8i16: @@ -15412,18 +15412,18 @@ ; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i8(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i8(,,, half*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i8(,,, half*, , , i64, i64) define @test_vloxseg3_nxv8f16_nxv8i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8f16_nxv8i8: @@ -15444,18 +15444,18 @@ ; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i64(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i64(,,, half*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i64(,,, half*, , , i64, i64) define @test_vloxseg3_nxv8f16_nxv8i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8f16_nxv8i64: @@ -15475,18 +15475,18 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg3ei64.v v6, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i64( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i64( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i32(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i32(,,, half*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i32(,,, half*, , , i64, i64) define @test_vloxseg3_nxv8f16_nxv8i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8f16_nxv8i32: @@ -15506,18 +15506,18 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v6, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i32( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i32( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i16(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i16(,,,, half*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i16(,,,, half*, , , i64, i64) define @test_vloxseg4_nxv8f16_nxv8i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8f16_nxv8i16: @@ -15539,18 +15539,18 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i8(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i8(,,,, half*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i8(,,,, half*, , , i64, i64) define @test_vloxseg4_nxv8f16_nxv8i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8f16_nxv8i8: @@ -15572,18 +15572,18 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i64(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i64(,,,, half*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i64(,,,, half*, , , i64, i64) define @test_vloxseg4_nxv8f16_nxv8i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8f16_nxv8i64: @@ -15604,18 +15604,18 @@ ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg4ei64.v v6, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i64( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i64( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i32(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i32(,,,, half*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i32(,,,, half*, , , i64, i64) define @test_vloxseg4_nxv8f16_nxv8i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8f16_nxv8i32: @@ -15637,18 +15637,18 @@ ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i16(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i16(,, float*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i16(,, float*, , , i64, i64) define @test_vloxseg2_nxv8f32_nxv8i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8f32_nxv8i16: @@ -15667,18 +15667,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i16( %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i16( %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i8(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i8(,, float*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i8(,, float*, , , i64, i64) define @test_vloxseg2_nxv8f32_nxv8i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8f32_nxv8i8: @@ -15697,18 +15697,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv8f32_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i8( %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i8( %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i64(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i64(,, float*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i64(,, float*, , , i64, i64) define @test_vloxseg2_nxv8f32_nxv8i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8f32_nxv8i64: @@ -15727,18 +15727,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v4, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i64( %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i64( %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i32(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i32(,, float*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i32(,, float*, , , i64, i64) define @test_vloxseg2_nxv8f32_nxv8i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8f32_nxv8i32: @@ -15757,18 +15757,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i32( %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i32( %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i32(double*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i32(,, double*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i32(,, double*, , , i64, i64) define @test_vloxseg2_nxv2f64_nxv2i32(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f64_nxv2i32: @@ -15787,18 +15787,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i32( %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i32( %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i8(double*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i8(,, double*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i8(,, double*, , , i64, i64) define @test_vloxseg2_nxv2f64_nxv2i8(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f64_nxv2i8: @@ -15817,18 +15817,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i8( %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i8( %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i16(double*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i16(,, double*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i16(,, double*, , , i64, i64) define @test_vloxseg2_nxv2f64_nxv2i16(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f64_nxv2i16: @@ -15847,18 +15847,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i16( %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i16( %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i64(double*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i64(,, double*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i64(,, double*, , , i64, i64) define @test_vloxseg2_nxv2f64_nxv2i64(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f64_nxv2i64: @@ -15877,18 +15877,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i64( %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i64( %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i32(double*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i32(,,, double*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i32(,,, double*, , , i64, i64) define @test_vloxseg3_nxv2f64_nxv2i32(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f64_nxv2i32: @@ -15909,18 +15909,18 @@ ; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i32( %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i32( %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i8(double*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i8(,,, double*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i8(,,, double*, , , i64, i64) define @test_vloxseg3_nxv2f64_nxv2i8(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f64_nxv2i8: @@ -15941,18 +15941,18 @@ ; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i8( %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i8( %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i16(double*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i16(,,, double*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i16(,,, double*, , , i64, i64) define @test_vloxseg3_nxv2f64_nxv2i16(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f64_nxv2i16: @@ -15973,18 +15973,18 @@ ; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i16( %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i16( %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i64(double*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i64(,,, double*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i64(,,, double*, , , i64, i64) define @test_vloxseg3_nxv2f64_nxv2i64(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f64_nxv2i64: @@ -16005,18 +16005,18 @@ ; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg3ei64.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i64( %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i64( %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i32(double*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i32(,,,, double*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i32(,,,, double*, , , i64, i64) define @test_vloxseg4_nxv2f64_nxv2i32(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f64_nxv2i32: @@ -16038,18 +16038,18 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i32( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i32( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i8(double*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i8(,,,, double*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i8(,,,, double*, , , i64, i64) define @test_vloxseg4_nxv2f64_nxv2i8(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f64_nxv2i8: @@ -16071,18 +16071,18 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i8( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i8( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i16(double*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i16(,,,, double*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i16(,,,, double*, , , i64, i64) define @test_vloxseg4_nxv2f64_nxv2i16(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f64_nxv2i16: @@ -16104,18 +16104,18 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i16( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i16( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i64(double*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i64(,,,, double*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i64(,,,, double*, , , i64, i64) define @test_vloxseg4_nxv2f64_nxv2i64(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f64_nxv2i64: @@ -16137,18 +16137,18 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg4ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i64( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i64( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i32(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i32(,, half*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i32(,, half*, , , i64, i64) define @test_vloxseg2_nxv4f16_nxv4i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f16_nxv4i32: @@ -16167,18 +16167,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i32( %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i32( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i8(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i8(,, half*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i8(,, half*, , , i64, i64) define @test_vloxseg2_nxv4f16_nxv4i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f16_nxv4i8: @@ -16197,18 +16197,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i8( %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i8( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i64(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i64(,, half*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i64(,, half*, , , i64, i64) define @test_vloxseg2_nxv4f16_nxv4i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f16_nxv4i64: @@ -16227,18 +16227,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i64( %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i64( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i16(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i16(,, half*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i16(,, half*, , , i64, i64) define @test_vloxseg2_nxv4f16_nxv4i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f16_nxv4i16: @@ -16257,18 +16257,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i16( %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i16( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i32(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i32(,,, half*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i32(,,, half*, , , i64, i64) define @test_vloxseg3_nxv4f16_nxv4i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4f16_nxv4i32: @@ -16288,18 +16288,18 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i32( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i32( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i8(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i8(,,, half*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i8(,,, half*, , , i64, i64) define @test_vloxseg3_nxv4f16_nxv4i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4f16_nxv4i8: @@ -16320,18 +16320,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i64(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i64(,,, half*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i64(,,, half*, , , i64, i64) define @test_vloxseg3_nxv4f16_nxv4i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4f16_nxv4i64: @@ -16351,18 +16351,18 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i64( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i64( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i16(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i16(,,, half*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i16(,,, half*, , , i64, i64) define @test_vloxseg3_nxv4f16_nxv4i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4f16_nxv4i16: @@ -16383,18 +16383,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i32(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i32(,,,, half*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i32(,,,, half*, , , i64, i64) define @test_vloxseg4_nxv4f16_nxv4i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4f16_nxv4i32: @@ -16416,18 +16416,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i8(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i8(,,,, half*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i8(,,,, half*, , , i64, i64) define @test_vloxseg4_nxv4f16_nxv4i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4f16_nxv4i8: @@ -16449,18 +16449,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i64(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i64(,,,, half*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i64(,,,, half*, , , i64, i64) define @test_vloxseg4_nxv4f16_nxv4i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4f16_nxv4i64: @@ -16481,18 +16481,18 @@ ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg4ei64.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i16(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i16(,,,, half*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i16(,,,, half*, , , i64, i64) define @test_vloxseg4_nxv4f16_nxv4i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4f16_nxv4i16: @@ -16514,18 +16514,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i32(half*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i32(,,,,, half*, , , i64) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i32(,,,,, half*, , , i64, i64) define @test_vloxseg5_nxv4f16_nxv4i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4f16_nxv4i32: @@ -16548,18 +16548,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i8(half*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i8(,,,,, half*, , , i64) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i8(,,,,, half*, , , i64, i64) define @test_vloxseg5_nxv4f16_nxv4i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4f16_nxv4i8: @@ -16582,18 +16582,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i64(half*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i64(,,,,, half*, , , i64) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i64(,,,,, half*, , , i64, i64) define @test_vloxseg5_nxv4f16_nxv4i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4f16_nxv4i64: @@ -16615,18 +16615,18 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg5ei64.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i16(half*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i16(,,,,, half*, , , i64) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i16(,,,,, half*, , , i64, i64) define @test_vloxseg5_nxv4f16_nxv4i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4f16_nxv4i16: @@ -16649,18 +16649,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i32(half*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i32(,,,,,, half*, , , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i32(,,,,,, half*, , , i64, i64) define @test_vloxseg6_nxv4f16_nxv4i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4f16_nxv4i32: @@ -16684,18 +16684,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i8(half*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i8(,,,,,, half*, , , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i8(,,,,,, half*, , , i64, i64) define @test_vloxseg6_nxv4f16_nxv4i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4f16_nxv4i8: @@ -16719,18 +16719,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i64(half*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i64(,,,,,, half*, , , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i64(,,,,,, half*, , , i64, i64) define @test_vloxseg6_nxv4f16_nxv4i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4f16_nxv4i64: @@ -16754,18 +16754,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v12, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i16(half*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i16(,,,,,, half*, , , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i16(,,,,,, half*, , , i64, i64) define @test_vloxseg6_nxv4f16_nxv4i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4f16_nxv4i16: @@ -16789,18 +16789,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i32(half*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i32(,,,,,,, half*, , , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i32(,,,,,,, half*, , , i64, i64) define @test_vloxseg7_nxv4f16_nxv4i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4f16_nxv4i32: @@ -16825,18 +16825,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i8(half*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i8(,,,,,,, half*, , , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i8(,,,,,,, half*, , , i64, i64) define @test_vloxseg7_nxv4f16_nxv4i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4f16_nxv4i8: @@ -16861,18 +16861,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i64(half*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i64(,,,,,,, half*, , , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i64(,,,,,,, half*, , , i64, i64) define @test_vloxseg7_nxv4f16_nxv4i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4f16_nxv4i64: @@ -16897,18 +16897,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v12, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i16(half*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i16(,,,,,,, half*, , , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i16(,,,,,,, half*, , , i64, i64) define @test_vloxseg7_nxv4f16_nxv4i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4f16_nxv4i16: @@ -16933,18 +16933,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i32(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i32(,,,,,,,, half*, , , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i32(,,,,,,,, half*, , , i64, i64) define @test_vloxseg8_nxv4f16_nxv4i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4f16_nxv4i32: @@ -16970,18 +16970,18 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i8(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i8(,,,,,,,, half*, , , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i8(,,,,,,,, half*, , , i64, i64) define @test_vloxseg8_nxv4f16_nxv4i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4f16_nxv4i8: @@ -17007,18 +17007,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i64(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i64(,,,,,,,, half*, , , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i64(,,,,,,,, half*, , , i64, i64) define @test_vloxseg8_nxv4f16_nxv4i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4f16_nxv4i64: @@ -17044,18 +17044,18 @@ ; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vmv1r.v v22, v16 ; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg8ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i16(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i16(,,,,,,,, half*, , , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i16(,,,,,,,, half*, , , i64, i64) define @test_vloxseg8_nxv4f16_nxv4i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4f16_nxv4i16: @@ -17081,18 +17081,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i32(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i32(,, half*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i32(,, half*, , , i64, i64) define @test_vloxseg2_nxv2f16_nxv2i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f16_nxv2i32: @@ -17111,18 +17111,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i32( %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i32( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i8(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i8(,, half*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i8(,, half*, , , i64, i64) define @test_vloxseg2_nxv2f16_nxv2i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f16_nxv2i8: @@ -17141,18 +17141,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i8( %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i8( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i16(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i16(,, half*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i16(,, half*, , , i64, i64) define @test_vloxseg2_nxv2f16_nxv2i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f16_nxv2i16: @@ -17171,18 +17171,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i16( %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i16( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i64(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i64(,, half*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i64(,, half*, , , i64, i64) define @test_vloxseg2_nxv2f16_nxv2i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f16_nxv2i64: @@ -17201,18 +17201,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i64( %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i64( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i32(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i32(,,, half*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i32(,,, half*, , , i64, i64) define @test_vloxseg3_nxv2f16_nxv2i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f16_nxv2i32: @@ -17233,18 +17233,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i32( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i32( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i8(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i8(,,, half*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i8(,,, half*, , , i64, i64) define @test_vloxseg3_nxv2f16_nxv2i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f16_nxv2i8: @@ -17265,18 +17265,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i16(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i16(,,, half*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i16(,,, half*, , , i64, i64) define @test_vloxseg3_nxv2f16_nxv2i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f16_nxv2i16: @@ -17297,18 +17297,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i64(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i64(,,, half*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i64(,,, half*, , , i64, i64) define @test_vloxseg3_nxv2f16_nxv2i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f16_nxv2i64: @@ -17328,18 +17328,18 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i64( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i64( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i32(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i32(,,,, half*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i32(,,,, half*, , , i64, i64) define @test_vloxseg4_nxv2f16_nxv2i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f16_nxv2i32: @@ -17361,18 +17361,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i8(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i8(,,,, half*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i8(,,,, half*, , , i64, i64) define @test_vloxseg4_nxv2f16_nxv2i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f16_nxv2i8: @@ -17394,18 +17394,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i16(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i16(,,,, half*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i16(,,,, half*, , , i64, i64) define @test_vloxseg4_nxv2f16_nxv2i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f16_nxv2i16: @@ -17427,18 +17427,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i64(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i64(,,,, half*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i64(,,,, half*, , , i64, i64) define @test_vloxseg4_nxv2f16_nxv2i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f16_nxv2i64: @@ -17460,18 +17460,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i32(half*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i32(,,,,, half*, , , i64) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i32(,,,,, half*, , , i64, i64) define @test_vloxseg5_nxv2f16_nxv2i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2f16_nxv2i32: @@ -17494,18 +17494,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i8(half*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i8(,,,,, half*, , , i64) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i8(,,,,, half*, , , i64, i64) define @test_vloxseg5_nxv2f16_nxv2i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2f16_nxv2i8: @@ -17528,18 +17528,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i16(half*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i16(,,,,, half*, , , i64) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i16(,,,,, half*, , , i64, i64) define @test_vloxseg5_nxv2f16_nxv2i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2f16_nxv2i16: @@ -17562,18 +17562,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i64(half*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i64(,,,,, half*, , , i64) +declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i64(,,,,, half*, , , i64, i64) define @test_vloxseg5_nxv2f16_nxv2i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2f16_nxv2i64: @@ -17596,18 +17596,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i32(half*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i32(,,,,,, half*, , , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i32(,,,,,, half*, , , i64, i64) define @test_vloxseg6_nxv2f16_nxv2i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2f16_nxv2i32: @@ -17631,18 +17631,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i8(half*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i8(,,,,,, half*, , , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i8(,,,,,, half*, , , i64, i64) define @test_vloxseg6_nxv2f16_nxv2i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2f16_nxv2i8: @@ -17666,18 +17666,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i16(half*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i16(,,,,,, half*, , , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i16(,,,,,, half*, , , i64, i64) define @test_vloxseg6_nxv2f16_nxv2i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2f16_nxv2i16: @@ -17701,18 +17701,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i64(half*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i64(,,,,,, half*, , , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i64(,,,,,, half*, , , i64, i64) define @test_vloxseg6_nxv2f16_nxv2i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2f16_nxv2i64: @@ -17736,18 +17736,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i32(half*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i32(,,,,,,, half*, , , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i32(,,,,,,, half*, , , i64, i64) define @test_vloxseg7_nxv2f16_nxv2i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2f16_nxv2i32: @@ -17772,18 +17772,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i8(half*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i8(,,,,,,, half*, , , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i8(,,,,,,, half*, , , i64, i64) define @test_vloxseg7_nxv2f16_nxv2i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2f16_nxv2i8: @@ -17808,18 +17808,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i16(half*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i16(,,,,,,, half*, , , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i16(,,,,,,, half*, , , i64, i64) define @test_vloxseg7_nxv2f16_nxv2i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2f16_nxv2i16: @@ -17844,18 +17844,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i64(half*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i64(,,,,,,, half*, , , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i64(,,,,,,, half*, , , i64, i64) define @test_vloxseg7_nxv2f16_nxv2i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2f16_nxv2i64: @@ -17880,18 +17880,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i32(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i32(,,,,,,,, half*, , , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i32(,,,,,,,, half*, , , i64, i64) define @test_vloxseg8_nxv2f16_nxv2i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2f16_nxv2i32: @@ -17917,18 +17917,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i8(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i8(,,,,,,,, half*, , , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i8(,,,,,,,, half*, , , i64, i64) define @test_vloxseg8_nxv2f16_nxv2i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2f16_nxv2i8: @@ -17954,18 +17954,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i16(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i16(,,,,,,,, half*, , , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i16(,,,,,,,, half*, , , i64, i64) define @test_vloxseg8_nxv2f16_nxv2i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2f16_nxv2i16: @@ -17991,18 +17991,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i64(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i64(,,,,,,,, half*, , , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i64(,,,,,,,, half*, , , i64, i64) define @test_vloxseg8_nxv2f16_nxv2i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2f16_nxv2i64: @@ -18028,18 +18028,18 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i32(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i32(,, float*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i32(,, float*, , , i64, i64) define @test_vloxseg2_nxv4f32_nxv4i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f32_nxv4i32: @@ -18058,18 +18058,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i32( %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i32( %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i8(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i8(,, float*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i8(,, float*, , , i64, i64) define @test_vloxseg2_nxv4f32_nxv4i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f32_nxv4i8: @@ -18088,18 +18088,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i8( %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i8( %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i64(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i64(,, float*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i64(,, float*, , , i64, i64) define @test_vloxseg2_nxv4f32_nxv4i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f32_nxv4i64: @@ -18118,18 +18118,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v6, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i64( %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i64( %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i16(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i16(,, float*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i16(,, float*, , , i64, i64) define @test_vloxseg2_nxv4f32_nxv4i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f32_nxv4i16: @@ -18148,18 +18148,18 @@ ; CHECK-LABEL: test_vloxseg2_mask_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i16( %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i16( %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i32(float*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i32(,,, float*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i32(,,, float*, , , i64, i64) define @test_vloxseg3_nxv4f32_nxv4i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4f32_nxv4i32: @@ -18180,18 +18180,18 @@ ; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i32( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i32( %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i8(float*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i8(,,, float*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i8(,,, float*, , , i64, i64) define @test_vloxseg3_nxv4f32_nxv4i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4f32_nxv4i8: @@ -18212,18 +18212,18 @@ ; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i8( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i8( %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i64(float*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i64(,,, float*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i64(,,, float*, , , i64, i64) define @test_vloxseg3_nxv4f32_nxv4i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4f32_nxv4i64: @@ -18243,18 +18243,18 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg3ei64.v v6, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i64( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i64( %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i16(float*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i16(,,, float*, , , i64) +declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i16(,,, float*, , , i64, i64) define @test_vloxseg3_nxv4f32_nxv4i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4f32_nxv4i16: @@ -18275,18 +18275,18 @@ ; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i16( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i16( %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i32(float*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i32(,,,, float*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i32(,,,, float*, , , i64, i64) define @test_vloxseg4_nxv4f32_nxv4i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4f32_nxv4i32: @@ -18308,18 +18308,18 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i32( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i32( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i8(float*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i8(,,,, float*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i8(,,,, float*, , , i64, i64) define @test_vloxseg4_nxv4f32_nxv4i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4f32_nxv4i8: @@ -18341,18 +18341,18 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i8( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i8( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i64(float*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i64(,,,, float*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i64(,,,, float*, , , i64, i64) define @test_vloxseg4_nxv4f32_nxv4i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4f32_nxv4i64: @@ -18374,18 +18374,18 @@ ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg4ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i64( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i64( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i16(float*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i16(,,,, float*, , , i64) +declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i16(,,,, float*, , , i64, i64) define @test_vloxseg4_nxv4f32_nxv4i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4f32_nxv4i16: @@ -18407,12 +18407,12 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i16( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i16( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } diff --git a/llvm/test/CodeGen/RISCV/rvv/vlse-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vlse-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vlse-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlse-rv32.ll @@ -26,12 +26,13 @@ *, i32, , + i32, i32); define @intrinsic_vlse_mask_v_nxv1i64_nxv1i64( %0, * %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ * %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -70,12 +71,13 @@ *, i32, , + i32, i32); define @intrinsic_vlse_mask_v_nxv2i64_nxv2i64( %0, * %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ * %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -114,12 +116,13 @@ *, i32, , + i32, i32); define @intrinsic_vlse_mask_v_nxv4i64_nxv4i64( %0, * %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ * %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -158,12 +161,13 @@ *, i32, , + i32, i32); define @intrinsic_vlse_mask_v_nxv8i64_nxv8i64( %0, * %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ * %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -202,12 +206,13 @@ *, i32, , + i32, i32); define @intrinsic_vlse_mask_v_nxv1f64_nxv1f64( %0, * %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ * %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -246,12 +251,13 @@ *, i32, , + i32, i32); define @intrinsic_vlse_mask_v_nxv2f64_nxv2f64( %0, * %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -260,7 +266,7 @@ * %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -290,12 +296,13 @@ *, i32, , + i32, i32); define @intrinsic_vlse_mask_v_nxv4f64_nxv4f64( %0, * %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -304,7 +311,7 @@ * %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -334,12 +341,13 @@ *, i32, , + i32, i32); define @intrinsic_vlse_mask_v_nxv8f64_nxv8f64( %0, * %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -348,7 +356,7 @@ * %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -378,12 +386,13 @@ *, i32, , + i32, i32); define @intrinsic_vlse_mask_v_nxv1i32_nxv1i32( %0, * %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vlse32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -392,7 +401,7 @@ * %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -422,12 +431,13 @@ *, i32, , + i32, i32); define @intrinsic_vlse_mask_v_nxv2i32_nxv2i32( %0, * %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vlse32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -436,7 +446,7 @@ * %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -466,12 +476,13 @@ *, i32, , + i32, i32); define @intrinsic_vlse_mask_v_nxv4i32_nxv4i32( %0, * %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu ; CHECK-NEXT: vlse32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -480,7 +491,7 @@ * %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -510,12 +521,13 @@ *, i32, , + i32, i32); define @intrinsic_vlse_mask_v_nxv8i32_nxv8i32( %0, * %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu ; CHECK-NEXT: vlse32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -524,7 +536,7 @@ * %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -554,12 +566,13 @@ *, i32, , + i32, i32); define @intrinsic_vlse_mask_v_nxv16i32_nxv16i32( %0, * %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu ; CHECK-NEXT: vlse32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -568,7 +581,7 @@ * %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -598,12 +611,13 @@ *, i32, , + i32, i32); define @intrinsic_vlse_mask_v_nxv1f32_nxv1f32( %0, * %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vlse32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -612,7 +626,7 @@ * %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -642,12 +656,13 @@ *, i32, , + i32, i32); define @intrinsic_vlse_mask_v_nxv2f32_nxv2f32( %0, * %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vlse32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -656,7 +671,7 @@ * %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -686,12 +701,13 @@ *, i32, , + i32, i32); define @intrinsic_vlse_mask_v_nxv4f32_nxv4f32( %0, * %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu ; CHECK-NEXT: vlse32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -700,7 +716,7 @@ * %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -730,12 +746,13 @@ *, i32, , + i32, i32); define @intrinsic_vlse_mask_v_nxv8f32_nxv8f32( %0, * %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu ; CHECK-NEXT: vlse32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -744,7 +761,7 @@ * %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -774,12 +791,13 @@ *, i32, , + i32, i32); define @intrinsic_vlse_mask_v_nxv16f32_nxv16f32( %0, * %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu ; CHECK-NEXT: vlse32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -788,7 +806,7 @@ * %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -818,12 +836,13 @@ *, i32, , + i32, i32); define @intrinsic_vlse_mask_v_nxv1i16_nxv1i16( %0, * %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vlse16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -832,7 +851,7 @@ * %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -862,12 +881,13 @@ *, i32, , + i32, i32); define @intrinsic_vlse_mask_v_nxv2i16_nxv2i16( %0, * %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vlse16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -876,7 +896,7 @@ * %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -906,12 +926,13 @@ *, i32, , + i32, i32); define @intrinsic_vlse_mask_v_nxv4i16_nxv4i16( %0, * %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vlse16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -920,7 +941,7 @@ * %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -950,12 +971,13 @@ *, i32, , + i32, i32); define @intrinsic_vlse_mask_v_nxv8i16_nxv8i16( %0, * %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu ; CHECK-NEXT: vlse16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -964,7 +986,7 @@ * %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -994,12 +1016,13 @@ *, i32, , + i32, i32); define @intrinsic_vlse_mask_v_nxv16i16_nxv16i16( %0, * %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu ; CHECK-NEXT: vlse16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1008,7 +1031,7 @@ * %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1038,12 +1061,13 @@ *, i32, , + i32, i32); define @intrinsic_vlse_mask_v_nxv32i16_nxv32i16( %0, * %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, mu ; CHECK-NEXT: vlse16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1052,7 +1076,7 @@ * %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1082,12 +1106,13 @@ *, i32, , + i32, i32); define @intrinsic_vlse_mask_v_nxv1f16_nxv1f16( %0, * %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vlse16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1096,7 +1121,7 @@ * %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1126,12 +1151,13 @@ *, i32, , + i32, i32); define @intrinsic_vlse_mask_v_nxv2f16_nxv2f16( %0, * %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vlse16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1140,7 +1166,7 @@ * %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1170,12 +1196,13 @@ *, i32, , + i32, i32); define @intrinsic_vlse_mask_v_nxv4f16_nxv4f16( %0, * %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vlse16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1184,7 +1211,7 @@ * %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1214,12 +1241,13 @@ *, i32, , + i32, i32); define @intrinsic_vlse_mask_v_nxv8f16_nxv8f16( %0, * %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu ; CHECK-NEXT: vlse16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1228,7 +1256,7 @@ * %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1258,12 +1286,13 @@ *, i32, , + i32, i32); define @intrinsic_vlse_mask_v_nxv16f16_nxv16f16( %0, * %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu ; CHECK-NEXT: vlse16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1272,7 +1301,7 @@ * %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1302,12 +1331,13 @@ *, i32, , + i32, i32); define @intrinsic_vlse_mask_v_nxv32f16_nxv32f16( %0, * %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, mu ; CHECK-NEXT: vlse16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1316,7 +1346,7 @@ * %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1346,12 +1376,13 @@ *, i32, , + i32, i32); define @intrinsic_vlse_mask_v_nxv1i8_nxv1i8( %0, * %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu ; CHECK-NEXT: vlse8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1360,7 +1391,7 @@ * %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1390,12 +1421,13 @@ *, i32, , + i32, i32); define @intrinsic_vlse_mask_v_nxv2i8_nxv2i8( %0, * %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu ; CHECK-NEXT: vlse8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1404,7 +1436,7 @@ * %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1434,12 +1466,13 @@ *, i32, , + i32, i32); define @intrinsic_vlse_mask_v_nxv4i8_nxv4i8( %0, * %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu ; CHECK-NEXT: vlse8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1448,7 +1481,7 @@ * %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1478,12 +1511,13 @@ *, i32, , + i32, i32); define @intrinsic_vlse_mask_v_nxv8i8_nxv8i8( %0, * %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu ; CHECK-NEXT: vlse8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1492,7 +1526,7 @@ * %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1522,12 +1556,13 @@ *, i32, , + i32, i32); define @intrinsic_vlse_mask_v_nxv16i8_nxv16i8( %0, * %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; CHECK-NEXT: vlse8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1536,7 +1571,7 @@ * %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1566,12 +1601,13 @@ *, i32, , + i32, i32); define @intrinsic_vlse_mask_v_nxv32i8_nxv32i8( %0, * %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu ; CHECK-NEXT: vlse8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1580,7 +1616,7 @@ * %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1610,12 +1646,13 @@ *, i32, , + i32, i32); define @intrinsic_vlse_mask_v_nxv64i8_nxv64i8( %0, * %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, mu ; CHECK-NEXT: vlse8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1624,7 +1661,7 @@ * %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vlse-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vlse-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vlse-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlse-rv64.ll @@ -26,12 +26,13 @@ *, i64, , + i64, i64); define @intrinsic_vlse_mask_v_nxv1i64_nxv1i64( %0, * %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ * %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -70,12 +71,13 @@ *, i64, , + i64, i64); define @intrinsic_vlse_mask_v_nxv2i64_nxv2i64( %0, * %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ * %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -114,12 +116,13 @@ *, i64, , + i64, i64); define @intrinsic_vlse_mask_v_nxv4i64_nxv4i64( %0, * %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ * %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -158,12 +161,13 @@ *, i64, , + i64, i64); define @intrinsic_vlse_mask_v_nxv8i64_nxv8i64( %0, * %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ * %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -202,12 +206,13 @@ *, i64, , + i64, i64); define @intrinsic_vlse_mask_v_nxv1f64_nxv1f64( %0, * %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ * %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -246,12 +251,13 @@ *, i64, , + i64, i64); define @intrinsic_vlse_mask_v_nxv2f64_nxv2f64( %0, * %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -260,7 +266,7 @@ * %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -290,12 +296,13 @@ *, i64, , + i64, i64); define @intrinsic_vlse_mask_v_nxv4f64_nxv4f64( %0, * %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -304,7 +311,7 @@ * %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -334,12 +341,13 @@ *, i64, , + i64, i64); define @intrinsic_vlse_mask_v_nxv8f64_nxv8f64( %0, * %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -348,7 +356,7 @@ * %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -378,12 +386,13 @@ *, i64, , + i64, i64); define @intrinsic_vlse_mask_v_nxv1i32_nxv1i32( %0, * %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vlse32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -392,7 +401,7 @@ * %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -422,12 +431,13 @@ *, i64, , + i64, i64); define @intrinsic_vlse_mask_v_nxv2i32_nxv2i32( %0, * %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vlse32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -436,7 +446,7 @@ * %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -466,12 +476,13 @@ *, i64, , + i64, i64); define @intrinsic_vlse_mask_v_nxv4i32_nxv4i32( %0, * %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu ; CHECK-NEXT: vlse32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -480,7 +491,7 @@ * %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -510,12 +521,13 @@ *, i64, , + i64, i64); define @intrinsic_vlse_mask_v_nxv8i32_nxv8i32( %0, * %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu ; CHECK-NEXT: vlse32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -524,7 +536,7 @@ * %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -554,12 +566,13 @@ *, i64, , + i64, i64); define @intrinsic_vlse_mask_v_nxv16i32_nxv16i32( %0, * %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu ; CHECK-NEXT: vlse32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -568,7 +581,7 @@ * %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -598,12 +611,13 @@ *, i64, , + i64, i64); define @intrinsic_vlse_mask_v_nxv1f32_nxv1f32( %0, * %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vlse32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -612,7 +626,7 @@ * %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -642,12 +656,13 @@ *, i64, , + i64, i64); define @intrinsic_vlse_mask_v_nxv2f32_nxv2f32( %0, * %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vlse32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -656,7 +671,7 @@ * %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -686,12 +701,13 @@ *, i64, , + i64, i64); define @intrinsic_vlse_mask_v_nxv4f32_nxv4f32( %0, * %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu ; CHECK-NEXT: vlse32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -700,7 +716,7 @@ * %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -730,12 +746,13 @@ *, i64, , + i64, i64); define @intrinsic_vlse_mask_v_nxv8f32_nxv8f32( %0, * %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu ; CHECK-NEXT: vlse32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -744,7 +761,7 @@ * %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -774,12 +791,13 @@ *, i64, , + i64, i64); define @intrinsic_vlse_mask_v_nxv16f32_nxv16f32( %0, * %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu ; CHECK-NEXT: vlse32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -788,7 +806,7 @@ * %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -818,12 +836,13 @@ *, i64, , + i64, i64); define @intrinsic_vlse_mask_v_nxv1i16_nxv1i16( %0, * %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vlse16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -832,7 +851,7 @@ * %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -862,12 +881,13 @@ *, i64, , + i64, i64); define @intrinsic_vlse_mask_v_nxv2i16_nxv2i16( %0, * %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vlse16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -876,7 +896,7 @@ * %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -906,12 +926,13 @@ *, i64, , + i64, i64); define @intrinsic_vlse_mask_v_nxv4i16_nxv4i16( %0, * %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vlse16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -920,7 +941,7 @@ * %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -950,12 +971,13 @@ *, i64, , + i64, i64); define @intrinsic_vlse_mask_v_nxv8i16_nxv8i16( %0, * %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu ; CHECK-NEXT: vlse16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -964,7 +986,7 @@ * %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -994,12 +1016,13 @@ *, i64, , + i64, i64); define @intrinsic_vlse_mask_v_nxv16i16_nxv16i16( %0, * %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu ; CHECK-NEXT: vlse16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1008,7 +1031,7 @@ * %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1038,12 +1061,13 @@ *, i64, , + i64, i64); define @intrinsic_vlse_mask_v_nxv32i16_nxv32i16( %0, * %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, mu ; CHECK-NEXT: vlse16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1052,7 +1076,7 @@ * %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1082,12 +1106,13 @@ *, i64, , + i64, i64); define @intrinsic_vlse_mask_v_nxv1f16_nxv1f16( %0, * %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vlse16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1096,7 +1121,7 @@ * %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1126,12 +1151,13 @@ *, i64, , + i64, i64); define @intrinsic_vlse_mask_v_nxv2f16_nxv2f16( %0, * %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vlse16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1140,7 +1166,7 @@ * %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1170,12 +1196,13 @@ *, i64, , + i64, i64); define @intrinsic_vlse_mask_v_nxv4f16_nxv4f16( %0, * %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vlse16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1184,7 +1211,7 @@ * %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1214,12 +1241,13 @@ *, i64, , + i64, i64); define @intrinsic_vlse_mask_v_nxv8f16_nxv8f16( %0, * %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu ; CHECK-NEXT: vlse16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1228,7 +1256,7 @@ * %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1258,12 +1286,13 @@ *, i64, , + i64, i64); define @intrinsic_vlse_mask_v_nxv16f16_nxv16f16( %0, * %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu ; CHECK-NEXT: vlse16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1272,7 +1301,7 @@ * %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1302,12 +1331,13 @@ *, i64, , + i64, i64); define @intrinsic_vlse_mask_v_nxv32f16_nxv32f16( %0, * %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, mu ; CHECK-NEXT: vlse16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1316,7 +1346,7 @@ * %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1346,12 +1376,13 @@ *, i64, , + i64, i64); define @intrinsic_vlse_mask_v_nxv1i8_nxv1i8( %0, * %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu ; CHECK-NEXT: vlse8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1360,7 +1391,7 @@ * %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1390,12 +1421,13 @@ *, i64, , + i64, i64); define @intrinsic_vlse_mask_v_nxv2i8_nxv2i8( %0, * %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu ; CHECK-NEXT: vlse8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1404,7 +1436,7 @@ * %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1434,12 +1466,13 @@ *, i64, , + i64, i64); define @intrinsic_vlse_mask_v_nxv4i8_nxv4i8( %0, * %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu ; CHECK-NEXT: vlse8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1448,7 +1481,7 @@ * %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1478,12 +1511,13 @@ *, i64, , + i64, i64); define @intrinsic_vlse_mask_v_nxv8i8_nxv8i8( %0, * %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu ; CHECK-NEXT: vlse8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1492,7 +1526,7 @@ * %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1522,12 +1556,13 @@ *, i64, , + i64, i64); define @intrinsic_vlse_mask_v_nxv16i8_nxv16i8( %0, * %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; CHECK-NEXT: vlse8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1536,7 +1571,7 @@ * %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1566,12 +1601,13 @@ *, i64, , + i64, i64); define @intrinsic_vlse_mask_v_nxv32i8_nxv32i8( %0, * %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu ; CHECK-NEXT: vlse8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1580,7 +1616,7 @@ * %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1610,12 +1646,13 @@ *, i64, , + i64, i64); define @intrinsic_vlse_mask_v_nxv64i8_nxv64i8( %0, * %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, mu ; CHECK-NEXT: vlse8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1624,7 +1661,7 @@ * %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vlseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vlseg-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vlseg-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlseg-rv32.ll @@ -3,7 +3,7 @@ ; RUN: -verify-machineinstrs < %s | FileCheck %s declare {,} @llvm.riscv.vlseg2.nxv16i16(i16* , i32) -declare {,} @llvm.riscv.vlseg2.mask.nxv16i16(,, i16*, , i32) +declare {,} @llvm.riscv.vlseg2.mask.nxv16i16(,, i16*, , i32, i32) define @test_vlseg2_nxv16i16(i16* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg2_nxv16i16: @@ -24,20 +24,19 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vlseg2e16.v v4, (a0) ; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: vsetvli zero, zero, e16, m4, tu, mu ; CHECK-NEXT: vlseg2e16.v v4, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv16i16(i16* %base, i32 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv16i16( %1, %1, i16* %base, %mask, i32 %vl) + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv16i16( %1, %1, i16* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlseg2.nxv1i8(i8* , i32) -declare {,} @llvm.riscv.vlseg2.mask.nxv1i8(,, i8*, , i32) +declare {,} @llvm.riscv.vlseg2.mask.nxv1i8(,, i8*, , i32, i32) define @test_vlseg2_nxv1i8(i8* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg2_nxv1i8: @@ -58,20 +57,19 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vlseg2e8.v v7, (a0) ; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, tu, mu ; CHECK-NEXT: vlseg2e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i8(i8* %base, i32 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv1i8( %1, %1, i8* %base, %mask, i32 %vl) + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv1i8( %1, %1, i8* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlseg3.nxv1i8(i8* , i32) -declare {,,} @llvm.riscv.vlseg3.mask.nxv1i8(,,, i8*, , i32) +declare {,,} @llvm.riscv.vlseg3.mask.nxv1i8(,,, i8*, , i32, i32) define @test_vlseg3_nxv1i8(i8* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg3_nxv1i8: @@ -93,20 +91,19 @@ ; CHECK-NEXT: vlseg3e8.v v7, (a0) ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, tu, mu ; CHECK-NEXT: vlseg3e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i8(i8* %base, i32 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv1i8( %1, %1, %1, i8* %base, %mask, i32 %vl) + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv1i8( %1, %1, %1, i8* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlseg4.nxv1i8(i8* , i32) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv1i8(,,,, i8*, , i32) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv1i8(,,,, i8*, , i32, i32) define @test_vlseg4_nxv1i8(i8* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg4_nxv1i8: @@ -129,20 +126,19 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, tu, mu ; CHECK-NEXT: vlseg4e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i8(i8* %base, i32 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv1i8( %1, %1, %1, %1, i8* %base, %mask, i32 %vl) + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv1i8( %1, %1, %1, %1, i8* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,,,,} @llvm.riscv.vlseg5.nxv1i8(i8* , i32) -declare {,,,,} @llvm.riscv.vlseg5.mask.nxv1i8(,,,,, i8*, , i32) +declare {,,,,} @llvm.riscv.vlseg5.mask.nxv1i8(,,,,, i8*, , i32, i32) define @test_vlseg5_nxv1i8(i8* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg5_nxv1i8: @@ -166,20 +162,19 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, tu, mu ; CHECK-NEXT: vlseg5e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i8(i8* %base, i32 %vl) %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv1i8( %1, %1, %1, %1, %1, i8* %base, %mask, i32 %vl) + %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv1i8( %1, %1, %1, %1, %1, i8* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } declare {,,,,,} @llvm.riscv.vlseg6.nxv1i8(i8* , i32) -declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i8(,,,,,, i8*, , i32) +declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i8(,,,,,, i8*, , i32, i32) define @test_vlseg6_nxv1i8(i8* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg6_nxv1i8: @@ -204,20 +199,19 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, tu, mu ; CHECK-NEXT: vlseg6e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i8(i8* %base, i32 %vl) %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i8( %1, %1, %1, %1, %1, %1, i8* %base, %mask, i32 %vl) + %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i8( %1, %1, %1, %1, %1, %1, i8* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } declare {,,,,,,} @llvm.riscv.vlseg7.nxv1i8(i8* , i32) -declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i8(,,,,,,, i8*, , i32) +declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i8(,,,,,,, i8*, , i32, i32) define @test_vlseg7_nxv1i8(i8* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg7_nxv1i8: @@ -243,20 +237,19 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, tu, mu ; CHECK-NEXT: vlseg7e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i8(i8* %base, i32 %vl) %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %mask, i32 %vl) + %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } declare {,,,,,,,} @llvm.riscv.vlseg8.nxv1i8(i8* , i32) -declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i8(,,,,,,,, i8*, , i32) +declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i8(,,,,,,,, i8*, , i32, i32) define @test_vlseg8_nxv1i8(i8* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg8_nxv1i8: @@ -283,20 +276,19 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, tu, mu ; CHECK-NEXT: vlseg8e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i8(i8* %base, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %mask, i32 %vl) + %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlseg2.nxv16i8(i8* , i32) -declare {,} @llvm.riscv.vlseg2.mask.nxv16i8(,, i8*, , i32) +declare {,} @llvm.riscv.vlseg2.mask.nxv16i8(,, i8*, , i32, i32) define @test_vlseg2_nxv16i8(i8* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg2_nxv16i8: @@ -317,20 +309,19 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vlseg2e8.v v6, (a0) ; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vsetvli zero, zero, e8, m2, tu, mu ; CHECK-NEXT: vlseg2e8.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv16i8(i8* %base, i32 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv16i8( %1, %1, i8* %base, %mask, i32 %vl) + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv16i8( %1, %1, i8* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlseg3.nxv16i8(i8* , i32) -declare {,,} @llvm.riscv.vlseg3.mask.nxv16i8(,,, i8*, , i32) +declare {,,} @llvm.riscv.vlseg3.mask.nxv16i8(,,, i8*, , i32, i32) define @test_vlseg3_nxv16i8(i8* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg3_nxv16i8: @@ -352,20 +343,19 @@ ; CHECK-NEXT: vlseg3e8.v v6, (a0) ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vsetvli zero, zero, e8, m2, tu, mu ; CHECK-NEXT: vlseg3e8.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv16i8(i8* %base, i32 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv16i8( %1, %1, %1, i8* %base, %mask, i32 %vl) + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv16i8( %1, %1, %1, i8* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlseg4.nxv16i8(i8* , i32) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv16i8(,,,, i8*, , i32) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv16i8(,,,, i8*, , i32, i32) define @test_vlseg4_nxv16i8(i8* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg4_nxv16i8: @@ -388,20 +378,19 @@ ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vsetvli zero, zero, e8, m2, tu, mu ; CHECK-NEXT: vlseg4e8.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv16i8(i8* %base, i32 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv16i8( %1, %1, %1, %1, i8* %base, %mask, i32 %vl) + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv16i8( %1, %1, %1, %1, i8* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlseg2.nxv2i32(i32* , i32) -declare {,} @llvm.riscv.vlseg2.mask.nxv2i32(,, i32*, , i32) +declare {,} @llvm.riscv.vlseg2.mask.nxv2i32(,, i32*, , i32, i32) define @test_vlseg2_nxv2i32(i32* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg2_nxv2i32: @@ -422,20 +411,19 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg2e32.v v7, (a0) ; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vlseg2e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i32(i32* %base, i32 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv2i32( %1, %1, i32* %base, %mask, i32 %vl) + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv2i32( %1, %1, i32* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlseg3.nxv2i32(i32* , i32) -declare {,,} @llvm.riscv.vlseg3.mask.nxv2i32(,,, i32*, , i32) +declare {,,} @llvm.riscv.vlseg3.mask.nxv2i32(,,, i32*, , i32, i32) define @test_vlseg3_nxv2i32(i32* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg3_nxv2i32: @@ -457,20 +445,19 @@ ; CHECK-NEXT: vlseg3e32.v v7, (a0) ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vlseg3e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i32(i32* %base, i32 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv2i32( %1, %1, %1, i32* %base, %mask, i32 %vl) + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv2i32( %1, %1, %1, i32* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlseg4.nxv2i32(i32* , i32) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv2i32(,,,, i32*, , i32) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv2i32(,,,, i32*, , i32, i32) define @test_vlseg4_nxv2i32(i32* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg4_nxv2i32: @@ -493,20 +480,19 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vlseg4e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i32(i32* %base, i32 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv2i32( %1, %1, %1, %1, i32* %base, %mask, i32 %vl) + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv2i32( %1, %1, %1, %1, i32* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,,,,} @llvm.riscv.vlseg5.nxv2i32(i32* , i32) -declare {,,,,} @llvm.riscv.vlseg5.mask.nxv2i32(,,,,, i32*, , i32) +declare {,,,,} @llvm.riscv.vlseg5.mask.nxv2i32(,,,,, i32*, , i32, i32) define @test_vlseg5_nxv2i32(i32* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg5_nxv2i32: @@ -530,20 +516,19 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vlseg5e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2i32(i32* %base, i32 %vl) %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv2i32( %1, %1, %1, %1, %1, i32* %base, %mask, i32 %vl) + %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv2i32( %1, %1, %1, %1, %1, i32* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } declare {,,,,,} @llvm.riscv.vlseg6.nxv2i32(i32* , i32) -declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv2i32(,,,,,, i32*, , i32) +declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv2i32(,,,,,, i32*, , i32, i32) define @test_vlseg6_nxv2i32(i32* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg6_nxv2i32: @@ -568,20 +553,19 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vlseg6e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2i32(i32* %base, i32 %vl) %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv2i32( %1, %1, %1, %1, %1, %1, i32* %base, %mask, i32 %vl) + %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv2i32( %1, %1, %1, %1, %1, %1, i32* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } declare {,,,,,,} @llvm.riscv.vlseg7.nxv2i32(i32* , i32) -declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2i32(,,,,,,, i32*, , i32) +declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2i32(,,,,,,, i32*, , i32, i32) define @test_vlseg7_nxv2i32(i32* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg7_nxv2i32: @@ -607,20 +591,19 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vlseg7e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2i32(i32* %base, i32 %vl) %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2i32( %1, %1, %1, %1, %1, %1, %1, i32* %base, %mask, i32 %vl) + %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2i32( %1, %1, %1, %1, %1, %1, %1, i32* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } declare {,,,,,,,} @llvm.riscv.vlseg8.nxv2i32(i32* , i32) -declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2i32(,,,,,,,, i32*, , i32) +declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2i32(,,,,,,,, i32*, , i32, i32) define @test_vlseg8_nxv2i32(i32* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg8_nxv2i32: @@ -647,20 +630,19 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vlseg8e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2i32(i32* %base, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2i32( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %mask, i32 %vl) + %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2i32( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlseg2.nxv4i16(i16* , i32) -declare {,} @llvm.riscv.vlseg2.mask.nxv4i16(,, i16*, , i32) +declare {,} @llvm.riscv.vlseg2.mask.nxv4i16(,, i16*, , i32, i32) define @test_vlseg2_nxv4i16(i16* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg2_nxv4i16: @@ -681,20 +663,19 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg2e16.v v7, (a0) ; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i16(i16* %base, i32 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv4i16( %1, %1, i16* %base, %mask, i32 %vl) + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv4i16( %1, %1, i16* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlseg3.nxv4i16(i16* , i32) -declare {,,} @llvm.riscv.vlseg3.mask.nxv4i16(,,, i16*, , i32) +declare {,,} @llvm.riscv.vlseg3.mask.nxv4i16(,,, i16*, , i32, i32) define @test_vlseg3_nxv4i16(i16* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg3_nxv4i16: @@ -716,20 +697,19 @@ ; CHECK-NEXT: vlseg3e16.v v7, (a0) ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i16(i16* %base, i32 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv4i16( %1, %1, %1, i16* %base, %mask, i32 %vl) + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv4i16( %1, %1, %1, i16* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlseg4.nxv4i16(i16* , i32) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv4i16(,,,, i16*, , i32) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv4i16(,,,, i16*, , i32, i32) define @test_vlseg4_nxv4i16(i16* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg4_nxv4i16: @@ -752,20 +732,19 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4i16(i16* %base, i32 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv4i16( %1, %1, %1, %1, i16* %base, %mask, i32 %vl) + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv4i16( %1, %1, %1, %1, i16* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,,,,} @llvm.riscv.vlseg5.nxv4i16(i16* , i32) -declare {,,,,} @llvm.riscv.vlseg5.mask.nxv4i16(,,,,, i16*, , i32) +declare {,,,,} @llvm.riscv.vlseg5.mask.nxv4i16(,,,,, i16*, , i32, i32) define @test_vlseg5_nxv4i16(i16* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg5_nxv4i16: @@ -789,20 +768,19 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv4i16(i16* %base, i32 %vl) %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv4i16( %1, %1, %1, %1, %1, i16* %base, %mask, i32 %vl) + %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv4i16( %1, %1, %1, %1, %1, i16* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } declare {,,,,,} @llvm.riscv.vlseg6.nxv4i16(i16* , i32) -declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv4i16(,,,,,, i16*, , i32) +declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv4i16(,,,,,, i16*, , i32, i32) define @test_vlseg6_nxv4i16(i16* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg6_nxv4i16: @@ -827,20 +805,19 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv4i16(i16* %base, i32 %vl) %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv4i16( %1, %1, %1, %1, %1, %1, i16* %base, %mask, i32 %vl) + %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv4i16( %1, %1, %1, %1, %1, %1, i16* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } declare {,,,,,,} @llvm.riscv.vlseg7.nxv4i16(i16* , i32) -declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv4i16(,,,,,,, i16*, , i32) +declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv4i16(,,,,,,, i16*, , i32, i32) define @test_vlseg7_nxv4i16(i16* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg7_nxv4i16: @@ -866,20 +843,19 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv4i16(i16* %base, i32 %vl) %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv4i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %mask, i32 %vl) + %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv4i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } declare {,,,,,,,} @llvm.riscv.vlseg8.nxv4i16(i16* , i32) -declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv4i16(,,,,,,,, i16*, , i32) +declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv4i16(,,,,,,,, i16*, , i32, i32) define @test_vlseg8_nxv4i16(i16* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg8_nxv4i16: @@ -906,20 +882,19 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv4i16(i16* %base, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv4i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %mask, i32 %vl) + %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv4i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlseg2.nxv1i32(i32* , i32) -declare {,} @llvm.riscv.vlseg2.mask.nxv1i32(,, i32*, , i32) +declare {,} @llvm.riscv.vlseg2.mask.nxv1i32(,, i32*, , i32, i32) define @test_vlseg2_nxv1i32(i32* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg2_nxv1i32: @@ -940,20 +915,19 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg2e32.v v7, (a0) ; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; CHECK-NEXT: vlseg2e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i32(i32* %base, i32 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv1i32( %1, %1, i32* %base, %mask, i32 %vl) + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv1i32( %1, %1, i32* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlseg3.nxv1i32(i32* , i32) -declare {,,} @llvm.riscv.vlseg3.mask.nxv1i32(,,, i32*, , i32) +declare {,,} @llvm.riscv.vlseg3.mask.nxv1i32(,,, i32*, , i32, i32) define @test_vlseg3_nxv1i32(i32* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg3_nxv1i32: @@ -975,20 +949,19 @@ ; CHECK-NEXT: vlseg3e32.v v7, (a0) ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; CHECK-NEXT: vlseg3e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i32(i32* %base, i32 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv1i32( %1, %1, %1, i32* %base, %mask, i32 %vl) + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv1i32( %1, %1, %1, i32* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlseg4.nxv1i32(i32* , i32) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv1i32(,,,, i32*, , i32) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv1i32(,,,, i32*, , i32, i32) define @test_vlseg4_nxv1i32(i32* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg4_nxv1i32: @@ -1011,20 +984,19 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; CHECK-NEXT: vlseg4e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i32(i32* %base, i32 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv1i32( %1, %1, %1, %1, i32* %base, %mask, i32 %vl) + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv1i32( %1, %1, %1, %1, i32* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,,,,} @llvm.riscv.vlseg5.nxv1i32(i32* , i32) -declare {,,,,} @llvm.riscv.vlseg5.mask.nxv1i32(,,,,, i32*, , i32) +declare {,,,,} @llvm.riscv.vlseg5.mask.nxv1i32(,,,,, i32*, , i32, i32) define @test_vlseg5_nxv1i32(i32* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg5_nxv1i32: @@ -1048,20 +1020,19 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; CHECK-NEXT: vlseg5e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i32(i32* %base, i32 %vl) %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv1i32( %1, %1, %1, %1, %1, i32* %base, %mask, i32 %vl) + %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv1i32( %1, %1, %1, %1, %1, i32* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } declare {,,,,,} @llvm.riscv.vlseg6.nxv1i32(i32* , i32) -declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i32(,,,,,, i32*, , i32) +declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i32(,,,,,, i32*, , i32, i32) define @test_vlseg6_nxv1i32(i32* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg6_nxv1i32: @@ -1086,20 +1057,19 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; CHECK-NEXT: vlseg6e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i32(i32* %base, i32 %vl) %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i32( %1, %1, %1, %1, %1, %1, i32* %base, %mask, i32 %vl) + %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i32( %1, %1, %1, %1, %1, %1, i32* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } declare {,,,,,,} @llvm.riscv.vlseg7.nxv1i32(i32* , i32) -declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i32(,,,,,,, i32*, , i32) +declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i32(,,,,,,, i32*, , i32, i32) define @test_vlseg7_nxv1i32(i32* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg7_nxv1i32: @@ -1125,20 +1095,19 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; CHECK-NEXT: vlseg7e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i32(i32* %base, i32 %vl) %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i32( %1, %1, %1, %1, %1, %1, %1, i32* %base, %mask, i32 %vl) + %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i32( %1, %1, %1, %1, %1, %1, %1, i32* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } declare {,,,,,,,} @llvm.riscv.vlseg8.nxv1i32(i32* , i32) -declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i32(,,,,,,,, i32*, , i32) +declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i32(,,,,,,,, i32*, , i32, i32) define @test_vlseg8_nxv1i32(i32* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg8_nxv1i32: @@ -1165,20 +1134,19 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; CHECK-NEXT: vlseg8e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i32(i32* %base, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i32( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %mask, i32 %vl) + %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i32( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlseg2.nxv8i16(i16* , i32) -declare {,} @llvm.riscv.vlseg2.mask.nxv8i16(,, i16*, , i32) +declare {,} @llvm.riscv.vlseg2.mask.nxv8i16(,, i16*, , i32, i32) define @test_vlseg2_nxv8i16(i16* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg2_nxv8i16: @@ -1199,20 +1167,19 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vlseg2e16.v v6, (a0) ; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, tu, mu ; CHECK-NEXT: vlseg2e16.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i16(i16* %base, i32 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv8i16( %1, %1, i16* %base, %mask, i32 %vl) + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv8i16( %1, %1, i16* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlseg3.nxv8i16(i16* , i32) -declare {,,} @llvm.riscv.vlseg3.mask.nxv8i16(,,, i16*, , i32) +declare {,,} @llvm.riscv.vlseg3.mask.nxv8i16(,,, i16*, , i32, i32) define @test_vlseg3_nxv8i16(i16* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg3_nxv8i16: @@ -1234,20 +1201,19 @@ ; CHECK-NEXT: vlseg3e16.v v6, (a0) ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, tu, mu ; CHECK-NEXT: vlseg3e16.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv8i16(i16* %base, i32 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv8i16( %1, %1, %1, i16* %base, %mask, i32 %vl) + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv8i16( %1, %1, %1, i16* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlseg4.nxv8i16(i16* , i32) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv8i16(,,,, i16*, , i32) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv8i16(,,,, i16*, , i32, i32) define @test_vlseg4_nxv8i16(i16* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg4_nxv8i16: @@ -1270,20 +1236,19 @@ ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, tu, mu ; CHECK-NEXT: vlseg4e16.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv8i16(i16* %base, i32 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv8i16( %1, %1, %1, %1, i16* %base, %mask, i32 %vl) + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv8i16( %1, %1, %1, %1, i16* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlseg2.nxv8i8(i8* , i32) -declare {,} @llvm.riscv.vlseg2.mask.nxv8i8(,, i8*, , i32) +declare {,} @llvm.riscv.vlseg2.mask.nxv8i8(,, i8*, , i32, i32) define @test_vlseg2_nxv8i8(i8* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg2_nxv8i8: @@ -1304,20 +1269,19 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vlseg2e8.v v7, (a0) ; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, m1, tu, mu ; CHECK-NEXT: vlseg2e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i8(i8* %base, i32 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv8i8( %1, %1, i8* %base, %mask, i32 %vl) + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv8i8( %1, %1, i8* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlseg3.nxv8i8(i8* , i32) -declare {,,} @llvm.riscv.vlseg3.mask.nxv8i8(,,, i8*, , i32) +declare {,,} @llvm.riscv.vlseg3.mask.nxv8i8(,,, i8*, , i32, i32) define @test_vlseg3_nxv8i8(i8* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg3_nxv8i8: @@ -1339,20 +1303,19 @@ ; CHECK-NEXT: vlseg3e8.v v7, (a0) ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, m1, tu, mu ; CHECK-NEXT: vlseg3e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv8i8(i8* %base, i32 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv8i8( %1, %1, %1, i8* %base, %mask, i32 %vl) + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv8i8( %1, %1, %1, i8* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlseg4.nxv8i8(i8* , i32) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv8i8(,,,, i8*, , i32) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv8i8(,,,, i8*, , i32, i32) define @test_vlseg4_nxv8i8(i8* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg4_nxv8i8: @@ -1375,20 +1338,19 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, m1, tu, mu ; CHECK-NEXT: vlseg4e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv8i8(i8* %base, i32 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv8i8( %1, %1, %1, %1, i8* %base, %mask, i32 %vl) + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv8i8( %1, %1, %1, %1, i8* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,,,,} @llvm.riscv.vlseg5.nxv8i8(i8* , i32) -declare {,,,,} @llvm.riscv.vlseg5.mask.nxv8i8(,,,,, i8*, , i32) +declare {,,,,} @llvm.riscv.vlseg5.mask.nxv8i8(,,,,, i8*, , i32, i32) define @test_vlseg5_nxv8i8(i8* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg5_nxv8i8: @@ -1412,20 +1374,19 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, m1, tu, mu ; CHECK-NEXT: vlseg5e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv8i8(i8* %base, i32 %vl) %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv8i8( %1, %1, %1, %1, %1, i8* %base, %mask, i32 %vl) + %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv8i8( %1, %1, %1, %1, %1, i8* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } declare {,,,,,} @llvm.riscv.vlseg6.nxv8i8(i8* , i32) -declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv8i8(,,,,,, i8*, , i32) +declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv8i8(,,,,,, i8*, , i32, i32) define @test_vlseg6_nxv8i8(i8* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg6_nxv8i8: @@ -1450,20 +1411,19 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, m1, tu, mu ; CHECK-NEXT: vlseg6e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv8i8(i8* %base, i32 %vl) %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv8i8( %1, %1, %1, %1, %1, %1, i8* %base, %mask, i32 %vl) + %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv8i8( %1, %1, %1, %1, %1, %1, i8* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } declare {,,,,,,} @llvm.riscv.vlseg7.nxv8i8(i8* , i32) -declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv8i8(,,,,,,, i8*, , i32) +declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv8i8(,,,,,,, i8*, , i32, i32) define @test_vlseg7_nxv8i8(i8* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg7_nxv8i8: @@ -1489,20 +1449,19 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, m1, tu, mu ; CHECK-NEXT: vlseg7e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv8i8(i8* %base, i32 %vl) %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv8i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %mask, i32 %vl) + %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv8i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } declare {,,,,,,,} @llvm.riscv.vlseg8.nxv8i8(i8* , i32) -declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv8i8(,,,,,,,, i8*, , i32) +declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv8i8(,,,,,,,, i8*, , i32, i32) define @test_vlseg8_nxv8i8(i8* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg8_nxv8i8: @@ -1529,20 +1488,19 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, m1, tu, mu ; CHECK-NEXT: vlseg8e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv8i8(i8* %base, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv8i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %mask, i32 %vl) + %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv8i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlseg2.nxv8i32(i32* , i32) -declare {,} @llvm.riscv.vlseg2.mask.nxv8i32(,, i32*, , i32) +declare {,} @llvm.riscv.vlseg2.mask.nxv8i32(,, i32*, , i32, i32) define @test_vlseg2_nxv8i32(i32* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg2_nxv8i32: @@ -1563,20 +1521,19 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vlseg2e32.v v4, (a0) ; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: vsetvli zero, zero, e32, m4, tu, mu ; CHECK-NEXT: vlseg2e32.v v4, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i32(i32* %base, i32 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv8i32( %1, %1, i32* %base, %mask, i32 %vl) + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv8i32( %1, %1, i32* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlseg2.nxv4i8(i8* , i32) -declare {,} @llvm.riscv.vlseg2.mask.nxv4i8(,, i8*, , i32) +declare {,} @llvm.riscv.vlseg2.mask.nxv4i8(,, i8*, , i32, i32) define @test_vlseg2_nxv4i8(i8* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg2_nxv4i8: @@ -1597,20 +1554,19 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vlseg2e8.v v7, (a0) ; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf2, tu, mu ; CHECK-NEXT: vlseg2e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i8(i8* %base, i32 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv4i8( %1, %1, i8* %base, %mask, i32 %vl) + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv4i8( %1, %1, i8* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlseg3.nxv4i8(i8* , i32) -declare {,,} @llvm.riscv.vlseg3.mask.nxv4i8(,,, i8*, , i32) +declare {,,} @llvm.riscv.vlseg3.mask.nxv4i8(,,, i8*, , i32, i32) define @test_vlseg3_nxv4i8(i8* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg3_nxv4i8: @@ -1632,20 +1588,19 @@ ; CHECK-NEXT: vlseg3e8.v v7, (a0) ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf2, tu, mu ; CHECK-NEXT: vlseg3e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i8(i8* %base, i32 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv4i8( %1, %1, %1, i8* %base, %mask, i32 %vl) + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv4i8( %1, %1, %1, i8* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlseg4.nxv4i8(i8* , i32) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv4i8(,,,, i8*, , i32) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv4i8(,,,, i8*, , i32, i32) define @test_vlseg4_nxv4i8(i8* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg4_nxv4i8: @@ -1668,20 +1623,19 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf2, tu, mu ; CHECK-NEXT: vlseg4e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4i8(i8* %base, i32 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv4i8( %1, %1, %1, %1, i8* %base, %mask, i32 %vl) + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv4i8( %1, %1, %1, %1, i8* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,,,,} @llvm.riscv.vlseg5.nxv4i8(i8* , i32) -declare {,,,,} @llvm.riscv.vlseg5.mask.nxv4i8(,,,,, i8*, , i32) +declare {,,,,} @llvm.riscv.vlseg5.mask.nxv4i8(,,,,, i8*, , i32, i32) define @test_vlseg5_nxv4i8(i8* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg5_nxv4i8: @@ -1705,20 +1659,19 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf2, tu, mu ; CHECK-NEXT: vlseg5e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv4i8(i8* %base, i32 %vl) %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv4i8( %1, %1, %1, %1, %1, i8* %base, %mask, i32 %vl) + %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv4i8( %1, %1, %1, %1, %1, i8* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } declare {,,,,,} @llvm.riscv.vlseg6.nxv4i8(i8* , i32) -declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv4i8(,,,,,, i8*, , i32) +declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv4i8(,,,,,, i8*, , i32, i32) define @test_vlseg6_nxv4i8(i8* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg6_nxv4i8: @@ -1743,20 +1696,19 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf2, tu, mu ; CHECK-NEXT: vlseg6e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv4i8(i8* %base, i32 %vl) %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv4i8( %1, %1, %1, %1, %1, %1, i8* %base, %mask, i32 %vl) + %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv4i8( %1, %1, %1, %1, %1, %1, i8* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } declare {,,,,,,} @llvm.riscv.vlseg7.nxv4i8(i8* , i32) -declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv4i8(,,,,,,, i8*, , i32) +declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv4i8(,,,,,,, i8*, , i32, i32) define @test_vlseg7_nxv4i8(i8* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg7_nxv4i8: @@ -1782,20 +1734,19 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf2, tu, mu ; CHECK-NEXT: vlseg7e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv4i8(i8* %base, i32 %vl) %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv4i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %mask, i32 %vl) + %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv4i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } declare {,,,,,,,} @llvm.riscv.vlseg8.nxv4i8(i8* , i32) -declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv4i8(,,,,,,,, i8*, , i32) +declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv4i8(,,,,,,,, i8*, , i32, i32) define @test_vlseg8_nxv4i8(i8* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg8_nxv4i8: @@ -1822,20 +1773,19 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf2, tu, mu ; CHECK-NEXT: vlseg8e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv4i8(i8* %base, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv4i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %mask, i32 %vl) + %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv4i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlseg2.nxv1i16(i16* , i32) -declare {,} @llvm.riscv.vlseg2.mask.nxv1i16(,, i16*, , i32) +declare {,} @llvm.riscv.vlseg2.mask.nxv1i16(,, i16*, , i32, i32) define @test_vlseg2_nxv1i16(i16* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg2_nxv1i16: @@ -1856,20 +1806,19 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg2e16.v v7, (a0) ; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, tu, mu ; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i16(i16* %base, i32 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv1i16( %1, %1, i16* %base, %mask, i32 %vl) + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv1i16( %1, %1, i16* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlseg3.nxv1i16(i16* , i32) -declare {,,} @llvm.riscv.vlseg3.mask.nxv1i16(,,, i16*, , i32) +declare {,,} @llvm.riscv.vlseg3.mask.nxv1i16(,,, i16*, , i32, i32) define @test_vlseg3_nxv1i16(i16* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg3_nxv1i16: @@ -1891,20 +1840,19 @@ ; CHECK-NEXT: vlseg3e16.v v7, (a0) ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, tu, mu ; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i16(i16* %base, i32 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv1i16( %1, %1, %1, i16* %base, %mask, i32 %vl) + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv1i16( %1, %1, %1, i16* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlseg4.nxv1i16(i16* , i32) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv1i16(,,,, i16*, , i32) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv1i16(,,,, i16*, , i32, i32) define @test_vlseg4_nxv1i16(i16* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg4_nxv1i16: @@ -1927,20 +1875,19 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, tu, mu ; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i16(i16* %base, i32 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv1i16( %1, %1, %1, %1, i16* %base, %mask, i32 %vl) + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv1i16( %1, %1, %1, %1, i16* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,,,,} @llvm.riscv.vlseg5.nxv1i16(i16* , i32) -declare {,,,,} @llvm.riscv.vlseg5.mask.nxv1i16(,,,,, i16*, , i32) +declare {,,,,} @llvm.riscv.vlseg5.mask.nxv1i16(,,,,, i16*, , i32, i32) define @test_vlseg5_nxv1i16(i16* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg5_nxv1i16: @@ -1964,20 +1911,19 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, tu, mu ; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i16(i16* %base, i32 %vl) %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv1i16( %1, %1, %1, %1, %1, i16* %base, %mask, i32 %vl) + %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv1i16( %1, %1, %1, %1, %1, i16* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } declare {,,,,,} @llvm.riscv.vlseg6.nxv1i16(i16* , i32) -declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i16(,,,,,, i16*, , i32) +declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i16(,,,,,, i16*, , i32, i32) define @test_vlseg6_nxv1i16(i16* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg6_nxv1i16: @@ -2002,20 +1948,19 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, tu, mu ; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i16(i16* %base, i32 %vl) %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i16( %1, %1, %1, %1, %1, %1, i16* %base, %mask, i32 %vl) + %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i16( %1, %1, %1, %1, %1, %1, i16* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } declare {,,,,,,} @llvm.riscv.vlseg7.nxv1i16(i16* , i32) -declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i16(,,,,,,, i16*, , i32) +declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i16(,,,,,,, i16*, , i32, i32) define @test_vlseg7_nxv1i16(i16* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg7_nxv1i16: @@ -2041,20 +1986,19 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, tu, mu ; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i16(i16* %base, i32 %vl) %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %mask, i32 %vl) + %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } declare {,,,,,,,} @llvm.riscv.vlseg8.nxv1i16(i16* , i32) -declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i16(,,,,,,,, i16*, , i32) +declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i16(,,,,,,,, i16*, , i32, i32) define @test_vlseg8_nxv1i16(i16* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg8_nxv1i16: @@ -2081,20 +2025,19 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, tu, mu ; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i16(i16* %base, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %mask, i32 %vl) + %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlseg2.nxv32i8(i8* , i32) -declare {,} @llvm.riscv.vlseg2.mask.nxv32i8(,, i8*, , i32) +declare {,} @llvm.riscv.vlseg2.mask.nxv32i8(,, i8*, , i32, i32) define @test_vlseg2_nxv32i8(i8* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg2_nxv32i8: @@ -2115,20 +2058,19 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vlseg2e8.v v4, (a0) ; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: vsetvli zero, zero, e8, m4, tu, mu ; CHECK-NEXT: vlseg2e8.v v4, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv32i8(i8* %base, i32 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv32i8( %1, %1, i8* %base, %mask, i32 %vl) + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv32i8( %1, %1, i8* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlseg2.nxv2i8(i8* , i32) -declare {,} @llvm.riscv.vlseg2.mask.nxv2i8(,, i8*, , i32) +declare {,} @llvm.riscv.vlseg2.mask.nxv2i8(,, i8*, , i32, i32) define @test_vlseg2_nxv2i8(i8* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg2_nxv2i8: @@ -2149,20 +2091,19 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vlseg2e8.v v7, (a0) ; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, tu, mu ; CHECK-NEXT: vlseg2e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i8(i8* %base, i32 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv2i8( %1, %1, i8* %base, %mask, i32 %vl) + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv2i8( %1, %1, i8* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlseg3.nxv2i8(i8* , i32) -declare {,,} @llvm.riscv.vlseg3.mask.nxv2i8(,,, i8*, , i32) +declare {,,} @llvm.riscv.vlseg3.mask.nxv2i8(,,, i8*, , i32, i32) define @test_vlseg3_nxv2i8(i8* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg3_nxv2i8: @@ -2184,20 +2125,19 @@ ; CHECK-NEXT: vlseg3e8.v v7, (a0) ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, tu, mu ; CHECK-NEXT: vlseg3e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i8(i8* %base, i32 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv2i8( %1, %1, %1, i8* %base, %mask, i32 %vl) + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv2i8( %1, %1, %1, i8* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlseg4.nxv2i8(i8* , i32) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv2i8(,,,, i8*, , i32) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv2i8(,,,, i8*, , i32, i32) define @test_vlseg4_nxv2i8(i8* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg4_nxv2i8: @@ -2220,20 +2160,19 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, tu, mu ; CHECK-NEXT: vlseg4e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i8(i8* %base, i32 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv2i8( %1, %1, %1, %1, i8* %base, %mask, i32 %vl) + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv2i8( %1, %1, %1, %1, i8* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,,,,} @llvm.riscv.vlseg5.nxv2i8(i8* , i32) -declare {,,,,} @llvm.riscv.vlseg5.mask.nxv2i8(,,,,, i8*, , i32) +declare {,,,,} @llvm.riscv.vlseg5.mask.nxv2i8(,,,,, i8*, , i32, i32) define @test_vlseg5_nxv2i8(i8* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg5_nxv2i8: @@ -2257,20 +2196,19 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, tu, mu ; CHECK-NEXT: vlseg5e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2i8(i8* %base, i32 %vl) %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv2i8( %1, %1, %1, %1, %1, i8* %base, %mask, i32 %vl) + %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv2i8( %1, %1, %1, %1, %1, i8* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } declare {,,,,,} @llvm.riscv.vlseg6.nxv2i8(i8* , i32) -declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv2i8(,,,,,, i8*, , i32) +declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv2i8(,,,,,, i8*, , i32, i32) define @test_vlseg6_nxv2i8(i8* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg6_nxv2i8: @@ -2295,20 +2233,19 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, tu, mu ; CHECK-NEXT: vlseg6e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2i8(i8* %base, i32 %vl) %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv2i8( %1, %1, %1, %1, %1, %1, i8* %base, %mask, i32 %vl) + %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv2i8( %1, %1, %1, %1, %1, %1, i8* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } declare {,,,,,,} @llvm.riscv.vlseg7.nxv2i8(i8* , i32) -declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2i8(,,,,,,, i8*, , i32) +declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2i8(,,,,,,, i8*, , i32, i32) define @test_vlseg7_nxv2i8(i8* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg7_nxv2i8: @@ -2334,20 +2271,19 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, tu, mu ; CHECK-NEXT: vlseg7e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2i8(i8* %base, i32 %vl) %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %mask, i32 %vl) + %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } declare {,,,,,,,} @llvm.riscv.vlseg8.nxv2i8(i8* , i32) -declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2i8(,,,,,,,, i8*, , i32) +declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2i8(,,,,,,,, i8*, , i32, i32) define @test_vlseg8_nxv2i8(i8* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg8_nxv2i8: @@ -2374,20 +2310,19 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, tu, mu ; CHECK-NEXT: vlseg8e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2i8(i8* %base, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %mask, i32 %vl) + %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlseg2.nxv2i16(i16* , i32) -declare {,} @llvm.riscv.vlseg2.mask.nxv2i16(,, i16*, , i32) +declare {,} @llvm.riscv.vlseg2.mask.nxv2i16(,, i16*, , i32, i32) define @test_vlseg2_nxv2i16(i16* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg2_nxv2i16: @@ -2408,20 +2343,19 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg2e16.v v7, (a0) ; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu ; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i16(i16* %base, i32 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv2i16( %1, %1, i16* %base, %mask, i32 %vl) + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv2i16( %1, %1, i16* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlseg3.nxv2i16(i16* , i32) -declare {,,} @llvm.riscv.vlseg3.mask.nxv2i16(,,, i16*, , i32) +declare {,,} @llvm.riscv.vlseg3.mask.nxv2i16(,,, i16*, , i32, i32) define @test_vlseg3_nxv2i16(i16* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg3_nxv2i16: @@ -2443,20 +2377,19 @@ ; CHECK-NEXT: vlseg3e16.v v7, (a0) ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu ; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i16(i16* %base, i32 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv2i16( %1, %1, %1, i16* %base, %mask, i32 %vl) + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv2i16( %1, %1, %1, i16* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlseg4.nxv2i16(i16* , i32) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv2i16(,,,, i16*, , i32) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv2i16(,,,, i16*, , i32, i32) define @test_vlseg4_nxv2i16(i16* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg4_nxv2i16: @@ -2479,20 +2412,19 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu ; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i16(i16* %base, i32 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv2i16( %1, %1, %1, %1, i16* %base, %mask, i32 %vl) + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv2i16( %1, %1, %1, %1, i16* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,,,,} @llvm.riscv.vlseg5.nxv2i16(i16* , i32) -declare {,,,,} @llvm.riscv.vlseg5.mask.nxv2i16(,,,,, i16*, , i32) +declare {,,,,} @llvm.riscv.vlseg5.mask.nxv2i16(,,,,, i16*, , i32, i32) define @test_vlseg5_nxv2i16(i16* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg5_nxv2i16: @@ -2516,20 +2448,19 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu ; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2i16(i16* %base, i32 %vl) %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv2i16( %1, %1, %1, %1, %1, i16* %base, %mask, i32 %vl) + %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv2i16( %1, %1, %1, %1, %1, i16* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } declare {,,,,,} @llvm.riscv.vlseg6.nxv2i16(i16* , i32) -declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv2i16(,,,,,, i16*, , i32) +declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv2i16(,,,,,, i16*, , i32, i32) define @test_vlseg6_nxv2i16(i16* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg6_nxv2i16: @@ -2554,20 +2485,19 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu ; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2i16(i16* %base, i32 %vl) %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv2i16( %1, %1, %1, %1, %1, %1, i16* %base, %mask, i32 %vl) + %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv2i16( %1, %1, %1, %1, %1, %1, i16* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } declare {,,,,,,} @llvm.riscv.vlseg7.nxv2i16(i16* , i32) -declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2i16(,,,,,,, i16*, , i32) +declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2i16(,,,,,,, i16*, , i32, i32) define @test_vlseg7_nxv2i16(i16* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg7_nxv2i16: @@ -2593,20 +2523,19 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu ; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2i16(i16* %base, i32 %vl) %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %mask, i32 %vl) + %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } declare {,,,,,,,} @llvm.riscv.vlseg8.nxv2i16(i16* , i32) -declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2i16(,,,,,,,, i16*, , i32) +declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2i16(,,,,,,,, i16*, , i32, i32) define @test_vlseg8_nxv2i16(i16* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg8_nxv2i16: @@ -2633,20 +2562,19 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu ; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2i16(i16* %base, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %mask, i32 %vl) + %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlseg2.nxv4i32(i32* , i32) -declare {,} @llvm.riscv.vlseg2.mask.nxv4i32(,, i32*, , i32) +declare {,} @llvm.riscv.vlseg2.mask.nxv4i32(,, i32*, , i32, i32) define @test_vlseg2_nxv4i32(i32* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg2_nxv4i32: @@ -2667,20 +2595,19 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vlseg2e32.v v6, (a0) ; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vsetvli zero, zero, e32, m2, tu, mu ; CHECK-NEXT: vlseg2e32.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i32(i32* %base, i32 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv4i32( %1, %1, i32* %base, %mask, i32 %vl) + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv4i32( %1, %1, i32* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlseg3.nxv4i32(i32* , i32) -declare {,,} @llvm.riscv.vlseg3.mask.nxv4i32(,,, i32*, , i32) +declare {,,} @llvm.riscv.vlseg3.mask.nxv4i32(,,, i32*, , i32, i32) define @test_vlseg3_nxv4i32(i32* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg3_nxv4i32: @@ -2702,20 +2629,19 @@ ; CHECK-NEXT: vlseg3e32.v v6, (a0) ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vsetvli zero, zero, e32, m2, tu, mu ; CHECK-NEXT: vlseg3e32.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i32(i32* %base, i32 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv4i32( %1, %1, %1, i32* %base, %mask, i32 %vl) + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv4i32( %1, %1, %1, i32* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlseg4.nxv4i32(i32* , i32) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv4i32(,,,, i32*, , i32) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv4i32(,,,, i32*, , i32, i32) define @test_vlseg4_nxv4i32(i32* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg4_nxv4i32: @@ -2738,20 +2664,19 @@ ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vsetvli zero, zero, e32, m2, tu, mu ; CHECK-NEXT: vlseg4e32.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4i32(i32* %base, i32 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv4i32( %1, %1, %1, %1, i32* %base, %mask, i32 %vl) + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv4i32( %1, %1, %1, %1, i32* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlseg2.nxv16f16(half* , i32) -declare {,} @llvm.riscv.vlseg2.mask.nxv16f16(,, half*, , i32) +declare {,} @llvm.riscv.vlseg2.mask.nxv16f16(,, half*, , i32, i32) define @test_vlseg2_nxv16f16(half* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg2_nxv16f16: @@ -2772,20 +2697,19 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vlseg2e16.v v4, (a0) ; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: vsetvli zero, zero, e16, m4, tu, mu ; CHECK-NEXT: vlseg2e16.v v4, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv16f16(half* %base, i32 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv16f16( %1, %1, half* %base, %mask, i32 %vl) + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv16f16( %1, %1, half* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlseg2.nxv4f64(double* , i32) -declare {,} @llvm.riscv.vlseg2.mask.nxv4f64(,, double*, , i32) +declare {,} @llvm.riscv.vlseg2.mask.nxv4f64(,, double*, , i32, i32) define @test_vlseg2_nxv4f64(double* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg2_nxv4f64: @@ -2806,20 +2730,19 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vlseg2e64.v v4, (a0) ; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu ; CHECK-NEXT: vlseg2e64.v v4, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv4f64(double* %base, i32 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv4f64( %1, %1, double* %base, %mask, i32 %vl) + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv4f64( %1, %1, double* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlseg2.nxv1f64(double* , i32) -declare {,} @llvm.riscv.vlseg2.mask.nxv1f64(,, double*, , i32) +declare {,} @llvm.riscv.vlseg2.mask.nxv1f64(,, double*, , i32, i32) define @test_vlseg2_nxv1f64(double* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg2_nxv1f64: @@ -2840,20 +2763,19 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg2e64.v v7, (a0) ; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; CHECK-NEXT: vlseg2e64.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv1f64(double* %base, i32 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv1f64( %1, %1, double* %base, %mask, i32 %vl) + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv1f64( %1, %1, double* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlseg3.nxv1f64(double* , i32) -declare {,,} @llvm.riscv.vlseg3.mask.nxv1f64(,,, double*, , i32) +declare {,,} @llvm.riscv.vlseg3.mask.nxv1f64(,,, double*, , i32, i32) define @test_vlseg3_nxv1f64(double* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg3_nxv1f64: @@ -2875,20 +2797,19 @@ ; CHECK-NEXT: vlseg3e64.v v7, (a0) ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; CHECK-NEXT: vlseg3e64.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1f64(double* %base, i32 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv1f64( %1, %1, %1, double* %base, %mask, i32 %vl) + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv1f64( %1, %1, %1, double* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlseg4.nxv1f64(double* , i32) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv1f64(,,,, double*, , i32) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv1f64(,,,, double*, , i32, i32) define @test_vlseg4_nxv1f64(double* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg4_nxv1f64: @@ -2911,20 +2832,19 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; CHECK-NEXT: vlseg4e64.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1f64(double* %base, i32 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv1f64( %1, %1, %1, %1, double* %base, %mask, i32 %vl) + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv1f64( %1, %1, %1, %1, double* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,,,,} @llvm.riscv.vlseg5.nxv1f64(double* , i32) -declare {,,,,} @llvm.riscv.vlseg5.mask.nxv1f64(,,,,, double*, , i32) +declare {,,,,} @llvm.riscv.vlseg5.mask.nxv1f64(,,,,, double*, , i32, i32) define @test_vlseg5_nxv1f64(double* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg5_nxv1f64: @@ -2948,20 +2868,19 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; CHECK-NEXT: vlseg5e64.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1f64(double* %base, i32 %vl) %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv1f64( %1, %1, %1, %1, %1, double* %base, %mask, i32 %vl) + %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv1f64( %1, %1, %1, %1, %1, double* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } declare {,,,,,} @llvm.riscv.vlseg6.nxv1f64(double* , i32) -declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv1f64(,,,,,, double*, , i32) +declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv1f64(,,,,,, double*, , i32, i32) define @test_vlseg6_nxv1f64(double* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg6_nxv1f64: @@ -2986,20 +2905,19 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; CHECK-NEXT: vlseg6e64.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1f64(double* %base, i32 %vl) %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv1f64( %1, %1, %1, %1, %1, %1, double* %base, %mask, i32 %vl) + %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv1f64( %1, %1, %1, %1, %1, %1, double* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } declare {,,,,,,} @llvm.riscv.vlseg7.nxv1f64(double* , i32) -declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1f64(,,,,,,, double*, , i32) +declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1f64(,,,,,,, double*, , i32, i32) define @test_vlseg7_nxv1f64(double* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg7_nxv1f64: @@ -3025,20 +2943,19 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; CHECK-NEXT: vlseg7e64.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1f64(double* %base, i32 %vl) %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1f64( %1, %1, %1, %1, %1, %1, %1, double* %base, %mask, i32 %vl) + %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1f64( %1, %1, %1, %1, %1, %1, %1, double* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } declare {,,,,,,,} @llvm.riscv.vlseg8.nxv1f64(double* , i32) -declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1f64(,,,,,,,, double*, , i32) +declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1f64(,,,,,,,, double*, , i32, i32) define @test_vlseg8_nxv1f64(double* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg8_nxv1f64: @@ -3065,20 +2982,19 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; CHECK-NEXT: vlseg8e64.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1f64(double* %base, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1f64( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %mask, i32 %vl) + %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1f64( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlseg2.nxv2f32(float* , i32) -declare {,} @llvm.riscv.vlseg2.mask.nxv2f32(,, float*, , i32) +declare {,} @llvm.riscv.vlseg2.mask.nxv2f32(,, float*, , i32, i32) define @test_vlseg2_nxv2f32(float* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg2_nxv2f32: @@ -3099,20 +3015,19 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg2e32.v v7, (a0) ; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vlseg2e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv2f32(float* %base, i32 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv2f32( %1, %1, float* %base, %mask, i32 %vl) + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv2f32( %1, %1, float* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlseg3.nxv2f32(float* , i32) -declare {,,} @llvm.riscv.vlseg3.mask.nxv2f32(,,, float*, , i32) +declare {,,} @llvm.riscv.vlseg3.mask.nxv2f32(,,, float*, , i32, i32) define @test_vlseg3_nxv2f32(float* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg3_nxv2f32: @@ -3134,20 +3049,19 @@ ; CHECK-NEXT: vlseg3e32.v v7, (a0) ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vlseg3e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2f32(float* %base, i32 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv2f32( %1, %1, %1, float* %base, %mask, i32 %vl) + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv2f32( %1, %1, %1, float* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlseg4.nxv2f32(float* , i32) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv2f32(,,,, float*, , i32) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv2f32(,,,, float*, , i32, i32) define @test_vlseg4_nxv2f32(float* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg4_nxv2f32: @@ -3170,20 +3084,19 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vlseg4e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2f32(float* %base, i32 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv2f32( %1, %1, %1, %1, float* %base, %mask, i32 %vl) + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv2f32( %1, %1, %1, %1, float* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,,,,} @llvm.riscv.vlseg5.nxv2f32(float* , i32) -declare {,,,,} @llvm.riscv.vlseg5.mask.nxv2f32(,,,,, float*, , i32) +declare {,,,,} @llvm.riscv.vlseg5.mask.nxv2f32(,,,,, float*, , i32, i32) define @test_vlseg5_nxv2f32(float* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg5_nxv2f32: @@ -3207,20 +3120,19 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vlseg5e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2f32(float* %base, i32 %vl) %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv2f32( %1, %1, %1, %1, %1, float* %base, %mask, i32 %vl) + %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv2f32( %1, %1, %1, %1, %1, float* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } declare {,,,,,} @llvm.riscv.vlseg6.nxv2f32(float* , i32) -declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv2f32(,,,,,, float*, , i32) +declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv2f32(,,,,,, float*, , i32, i32) define @test_vlseg6_nxv2f32(float* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg6_nxv2f32: @@ -3245,20 +3157,19 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vlseg6e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2f32(float* %base, i32 %vl) %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv2f32( %1, %1, %1, %1, %1, %1, float* %base, %mask, i32 %vl) + %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv2f32( %1, %1, %1, %1, %1, %1, float* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } declare {,,,,,,} @llvm.riscv.vlseg7.nxv2f32(float* , i32) -declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2f32(,,,,,,, float*, , i32) +declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2f32(,,,,,,, float*, , i32, i32) define @test_vlseg7_nxv2f32(float* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg7_nxv2f32: @@ -3284,20 +3195,19 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vlseg7e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2f32(float* %base, i32 %vl) %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2f32( %1, %1, %1, %1, %1, %1, %1, float* %base, %mask, i32 %vl) + %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2f32( %1, %1, %1, %1, %1, %1, %1, float* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } declare {,,,,,,,} @llvm.riscv.vlseg8.nxv2f32(float* , i32) -declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2f32(,,,,,,,, float*, , i32) +declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2f32(,,,,,,,, float*, , i32, i32) define @test_vlseg8_nxv2f32(float* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg8_nxv2f32: @@ -3324,20 +3234,19 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vlseg8e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2f32(float* %base, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2f32( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %mask, i32 %vl) + %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2f32( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlseg2.nxv1f16(half* , i32) -declare {,} @llvm.riscv.vlseg2.mask.nxv1f16(,, half*, , i32) +declare {,} @llvm.riscv.vlseg2.mask.nxv1f16(,, half*, , i32, i32) define @test_vlseg2_nxv1f16(half* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg2_nxv1f16: @@ -3358,20 +3267,19 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg2e16.v v7, (a0) ; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, tu, mu ; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv1f16(half* %base, i32 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv1f16( %1, %1, half* %base, %mask, i32 %vl) + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv1f16( %1, %1, half* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlseg3.nxv1f16(half* , i32) -declare {,,} @llvm.riscv.vlseg3.mask.nxv1f16(,,, half*, , i32) +declare {,,} @llvm.riscv.vlseg3.mask.nxv1f16(,,, half*, , i32, i32) define @test_vlseg3_nxv1f16(half* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg3_nxv1f16: @@ -3393,20 +3301,19 @@ ; CHECK-NEXT: vlseg3e16.v v7, (a0) ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, tu, mu ; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1f16(half* %base, i32 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv1f16( %1, %1, %1, half* %base, %mask, i32 %vl) + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv1f16( %1, %1, %1, half* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlseg4.nxv1f16(half* , i32) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv1f16(,,,, half*, , i32) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv1f16(,,,, half*, , i32, i32) define @test_vlseg4_nxv1f16(half* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg4_nxv1f16: @@ -3429,20 +3336,19 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, tu, mu ; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1f16(half* %base, i32 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv1f16( %1, %1, %1, %1, half* %base, %mask, i32 %vl) + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv1f16( %1, %1, %1, %1, half* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,,,,} @llvm.riscv.vlseg5.nxv1f16(half* , i32) -declare {,,,,} @llvm.riscv.vlseg5.mask.nxv1f16(,,,,, half*, , i32) +declare {,,,,} @llvm.riscv.vlseg5.mask.nxv1f16(,,,,, half*, , i32, i32) define @test_vlseg5_nxv1f16(half* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg5_nxv1f16: @@ -3466,20 +3372,19 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, tu, mu ; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1f16(half* %base, i32 %vl) %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv1f16( %1, %1, %1, %1, %1, half* %base, %mask, i32 %vl) + %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv1f16( %1, %1, %1, %1, %1, half* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } declare {,,,,,} @llvm.riscv.vlseg6.nxv1f16(half* , i32) -declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv1f16(,,,,,, half*, , i32) +declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv1f16(,,,,,, half*, , i32, i32) define @test_vlseg6_nxv1f16(half* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg6_nxv1f16: @@ -3504,20 +3409,19 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, tu, mu ; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1f16(half* %base, i32 %vl) %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv1f16( %1, %1, %1, %1, %1, %1, half* %base, %mask, i32 %vl) + %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv1f16( %1, %1, %1, %1, %1, %1, half* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } declare {,,,,,,} @llvm.riscv.vlseg7.nxv1f16(half* , i32) -declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1f16(,,,,,,, half*, , i32) +declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1f16(,,,,,,, half*, , i32, i32) define @test_vlseg7_nxv1f16(half* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg7_nxv1f16: @@ -3543,20 +3447,19 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, tu, mu ; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1f16(half* %base, i32 %vl) %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1f16( %1, %1, %1, %1, %1, %1, %1, half* %base, %mask, i32 %vl) + %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1f16( %1, %1, %1, %1, %1, %1, %1, half* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } declare {,,,,,,,} @llvm.riscv.vlseg8.nxv1f16(half* , i32) -declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1f16(,,,,,,,, half*, , i32) +declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1f16(,,,,,,,, half*, , i32, i32) define @test_vlseg8_nxv1f16(half* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg8_nxv1f16: @@ -3583,20 +3486,19 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, tu, mu ; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1f16(half* %base, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1f16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %mask, i32 %vl) + %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1f16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlseg2.nxv1f32(float* , i32) -declare {,} @llvm.riscv.vlseg2.mask.nxv1f32(,, float*, , i32) +declare {,} @llvm.riscv.vlseg2.mask.nxv1f32(,, float*, , i32, i32) define @test_vlseg2_nxv1f32(float* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg2_nxv1f32: @@ -3617,20 +3519,19 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg2e32.v v7, (a0) ; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; CHECK-NEXT: vlseg2e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv1f32(float* %base, i32 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv1f32( %1, %1, float* %base, %mask, i32 %vl) + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv1f32( %1, %1, float* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlseg3.nxv1f32(float* , i32) -declare {,,} @llvm.riscv.vlseg3.mask.nxv1f32(,,, float*, , i32) +declare {,,} @llvm.riscv.vlseg3.mask.nxv1f32(,,, float*, , i32, i32) define @test_vlseg3_nxv1f32(float* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg3_nxv1f32: @@ -3652,20 +3553,19 @@ ; CHECK-NEXT: vlseg3e32.v v7, (a0) ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; CHECK-NEXT: vlseg3e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1f32(float* %base, i32 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv1f32( %1, %1, %1, float* %base, %mask, i32 %vl) + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv1f32( %1, %1, %1, float* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlseg4.nxv1f32(float* , i32) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv1f32(,,,, float*, , i32) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv1f32(,,,, float*, , i32, i32) define @test_vlseg4_nxv1f32(float* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg4_nxv1f32: @@ -3688,20 +3588,19 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; CHECK-NEXT: vlseg4e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1f32(float* %base, i32 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv1f32( %1, %1, %1, %1, float* %base, %mask, i32 %vl) + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv1f32( %1, %1, %1, %1, float* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,,,,} @llvm.riscv.vlseg5.nxv1f32(float* , i32) -declare {,,,,} @llvm.riscv.vlseg5.mask.nxv1f32(,,,,, float*, , i32) +declare {,,,,} @llvm.riscv.vlseg5.mask.nxv1f32(,,,,, float*, , i32, i32) define @test_vlseg5_nxv1f32(float* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg5_nxv1f32: @@ -3725,20 +3624,19 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; CHECK-NEXT: vlseg5e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1f32(float* %base, i32 %vl) %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv1f32( %1, %1, %1, %1, %1, float* %base, %mask, i32 %vl) + %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv1f32( %1, %1, %1, %1, %1, float* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } declare {,,,,,} @llvm.riscv.vlseg6.nxv1f32(float* , i32) -declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv1f32(,,,,,, float*, , i32) +declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv1f32(,,,,,, float*, , i32, i32) define @test_vlseg6_nxv1f32(float* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg6_nxv1f32: @@ -3763,20 +3661,19 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; CHECK-NEXT: vlseg6e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1f32(float* %base, i32 %vl) %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv1f32( %1, %1, %1, %1, %1, %1, float* %base, %mask, i32 %vl) + %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv1f32( %1, %1, %1, %1, %1, %1, float* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } declare {,,,,,,} @llvm.riscv.vlseg7.nxv1f32(float* , i32) -declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1f32(,,,,,,, float*, , i32) +declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1f32(,,,,,,, float*, , i32, i32) define @test_vlseg7_nxv1f32(float* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg7_nxv1f32: @@ -3802,20 +3699,19 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; CHECK-NEXT: vlseg7e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1f32(float* %base, i32 %vl) %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1f32( %1, %1, %1, %1, %1, %1, %1, float* %base, %mask, i32 %vl) + %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1f32( %1, %1, %1, %1, %1, %1, %1, float* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } declare {,,,,,,,} @llvm.riscv.vlseg8.nxv1f32(float* , i32) -declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1f32(,,,,,,,, float*, , i32) +declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1f32(,,,,,,,, float*, , i32, i32) define @test_vlseg8_nxv1f32(float* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg8_nxv1f32: @@ -3842,20 +3738,19 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; CHECK-NEXT: vlseg8e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1f32(float* %base, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1f32( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %mask, i32 %vl) + %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1f32( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlseg2.nxv8f16(half* , i32) -declare {,} @llvm.riscv.vlseg2.mask.nxv8f16(,, half*, , i32) +declare {,} @llvm.riscv.vlseg2.mask.nxv8f16(,, half*, , i32, i32) define @test_vlseg2_nxv8f16(half* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg2_nxv8f16: @@ -3876,20 +3771,19 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vlseg2e16.v v6, (a0) ; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, tu, mu ; CHECK-NEXT: vlseg2e16.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv8f16(half* %base, i32 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv8f16( %1, %1, half* %base, %mask, i32 %vl) + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv8f16( %1, %1, half* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlseg3.nxv8f16(half* , i32) -declare {,,} @llvm.riscv.vlseg3.mask.nxv8f16(,,, half*, , i32) +declare {,,} @llvm.riscv.vlseg3.mask.nxv8f16(,,, half*, , i32, i32) define @test_vlseg3_nxv8f16(half* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg3_nxv8f16: @@ -3911,20 +3805,19 @@ ; CHECK-NEXT: vlseg3e16.v v6, (a0) ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, tu, mu ; CHECK-NEXT: vlseg3e16.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv8f16(half* %base, i32 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv8f16( %1, %1, %1, half* %base, %mask, i32 %vl) + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv8f16( %1, %1, %1, half* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlseg4.nxv8f16(half* , i32) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv8f16(,,,, half*, , i32) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv8f16(,,,, half*, , i32, i32) define @test_vlseg4_nxv8f16(half* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg4_nxv8f16: @@ -3947,20 +3840,19 @@ ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, tu, mu ; CHECK-NEXT: vlseg4e16.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv8f16(half* %base, i32 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv8f16( %1, %1, %1, %1, half* %base, %mask, i32 %vl) + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv8f16( %1, %1, %1, %1, half* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlseg2.nxv8f32(float* , i32) -declare {,} @llvm.riscv.vlseg2.mask.nxv8f32(,, float*, , i32) +declare {,} @llvm.riscv.vlseg2.mask.nxv8f32(,, float*, , i32, i32) define @test_vlseg2_nxv8f32(float* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg2_nxv8f32: @@ -3981,20 +3873,19 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vlseg2e32.v v4, (a0) ; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: vsetvli zero, zero, e32, m4, tu, mu ; CHECK-NEXT: vlseg2e32.v v4, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv8f32(float* %base, i32 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv8f32( %1, %1, float* %base, %mask, i32 %vl) + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv8f32( %1, %1, float* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlseg2.nxv2f64(double* , i32) -declare {,} @llvm.riscv.vlseg2.mask.nxv2f64(,, double*, , i32) +declare {,} @llvm.riscv.vlseg2.mask.nxv2f64(,, double*, , i32, i32) define @test_vlseg2_nxv2f64(double* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg2_nxv2f64: @@ -4015,20 +3906,19 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vlseg2e64.v v6, (a0) ; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu ; CHECK-NEXT: vlseg2e64.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv2f64(double* %base, i32 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv2f64( %1, %1, double* %base, %mask, i32 %vl) + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv2f64( %1, %1, double* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlseg3.nxv2f64(double* , i32) -declare {,,} @llvm.riscv.vlseg3.mask.nxv2f64(,,, double*, , i32) +declare {,,} @llvm.riscv.vlseg3.mask.nxv2f64(,,, double*, , i32, i32) define @test_vlseg3_nxv2f64(double* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg3_nxv2f64: @@ -4050,20 +3940,19 @@ ; CHECK-NEXT: vlseg3e64.v v6, (a0) ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu ; CHECK-NEXT: vlseg3e64.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2f64(double* %base, i32 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv2f64( %1, %1, %1, double* %base, %mask, i32 %vl) + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv2f64( %1, %1, %1, double* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlseg4.nxv2f64(double* , i32) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv2f64(,,,, double*, , i32) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv2f64(,,,, double*, , i32, i32) define @test_vlseg4_nxv2f64(double* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg4_nxv2f64: @@ -4086,20 +3975,19 @@ ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu ; CHECK-NEXT: vlseg4e64.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2f64(double* %base, i32 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv2f64( %1, %1, %1, %1, double* %base, %mask, i32 %vl) + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv2f64( %1, %1, %1, %1, double* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlseg2.nxv4f16(half* , i32) -declare {,} @llvm.riscv.vlseg2.mask.nxv4f16(,, half*, , i32) +declare {,} @llvm.riscv.vlseg2.mask.nxv4f16(,, half*, , i32, i32) define @test_vlseg2_nxv4f16(half* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg2_nxv4f16: @@ -4120,20 +4008,19 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg2e16.v v7, (a0) ; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv4f16(half* %base, i32 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv4f16( %1, %1, half* %base, %mask, i32 %vl) + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv4f16( %1, %1, half* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlseg3.nxv4f16(half* , i32) -declare {,,} @llvm.riscv.vlseg3.mask.nxv4f16(,,, half*, , i32) +declare {,,} @llvm.riscv.vlseg3.mask.nxv4f16(,,, half*, , i32, i32) define @test_vlseg3_nxv4f16(half* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg3_nxv4f16: @@ -4155,20 +4042,19 @@ ; CHECK-NEXT: vlseg3e16.v v7, (a0) ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4f16(half* %base, i32 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv4f16( %1, %1, %1, half* %base, %mask, i32 %vl) + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv4f16( %1, %1, %1, half* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlseg4.nxv4f16(half* , i32) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv4f16(,,,, half*, , i32) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv4f16(,,,, half*, , i32, i32) define @test_vlseg4_nxv4f16(half* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg4_nxv4f16: @@ -4191,20 +4077,19 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4f16(half* %base, i32 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv4f16( %1, %1, %1, %1, half* %base, %mask, i32 %vl) + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv4f16( %1, %1, %1, %1, half* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,,,,} @llvm.riscv.vlseg5.nxv4f16(half* , i32) -declare {,,,,} @llvm.riscv.vlseg5.mask.nxv4f16(,,,,, half*, , i32) +declare {,,,,} @llvm.riscv.vlseg5.mask.nxv4f16(,,,,, half*, , i32, i32) define @test_vlseg5_nxv4f16(half* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg5_nxv4f16: @@ -4228,20 +4113,19 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv4f16(half* %base, i32 %vl) %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv4f16( %1, %1, %1, %1, %1, half* %base, %mask, i32 %vl) + %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv4f16( %1, %1, %1, %1, %1, half* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } declare {,,,,,} @llvm.riscv.vlseg6.nxv4f16(half* , i32) -declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv4f16(,,,,,, half*, , i32) +declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv4f16(,,,,,, half*, , i32, i32) define @test_vlseg6_nxv4f16(half* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg6_nxv4f16: @@ -4266,20 +4150,19 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv4f16(half* %base, i32 %vl) %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv4f16( %1, %1, %1, %1, %1, %1, half* %base, %mask, i32 %vl) + %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv4f16( %1, %1, %1, %1, %1, %1, half* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } declare {,,,,,,} @llvm.riscv.vlseg7.nxv4f16(half* , i32) -declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv4f16(,,,,,,, half*, , i32) +declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv4f16(,,,,,,, half*, , i32, i32) define @test_vlseg7_nxv4f16(half* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg7_nxv4f16: @@ -4305,20 +4188,19 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv4f16(half* %base, i32 %vl) %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv4f16( %1, %1, %1, %1, %1, %1, %1, half* %base, %mask, i32 %vl) + %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv4f16( %1, %1, %1, %1, %1, %1, %1, half* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } declare {,,,,,,,} @llvm.riscv.vlseg8.nxv4f16(half* , i32) -declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv4f16(,,,,,,,, half*, , i32) +declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv4f16(,,,,,,,, half*, , i32, i32) define @test_vlseg8_nxv4f16(half* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg8_nxv4f16: @@ -4345,20 +4227,19 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv4f16(half* %base, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv4f16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %mask, i32 %vl) + %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv4f16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlseg2.nxv2f16(half* , i32) -declare {,} @llvm.riscv.vlseg2.mask.nxv2f16(,, half*, , i32) +declare {,} @llvm.riscv.vlseg2.mask.nxv2f16(,, half*, , i32, i32) define @test_vlseg2_nxv2f16(half* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg2_nxv2f16: @@ -4379,20 +4260,19 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg2e16.v v7, (a0) ; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu ; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv2f16(half* %base, i32 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv2f16( %1, %1, half* %base, %mask, i32 %vl) + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv2f16( %1, %1, half* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlseg3.nxv2f16(half* , i32) -declare {,,} @llvm.riscv.vlseg3.mask.nxv2f16(,,, half*, , i32) +declare {,,} @llvm.riscv.vlseg3.mask.nxv2f16(,,, half*, , i32, i32) define @test_vlseg3_nxv2f16(half* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg3_nxv2f16: @@ -4414,20 +4294,19 @@ ; CHECK-NEXT: vlseg3e16.v v7, (a0) ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu ; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2f16(half* %base, i32 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv2f16( %1, %1, %1, half* %base, %mask, i32 %vl) + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv2f16( %1, %1, %1, half* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlseg4.nxv2f16(half* , i32) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv2f16(,,,, half*, , i32) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv2f16(,,,, half*, , i32, i32) define @test_vlseg4_nxv2f16(half* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg4_nxv2f16: @@ -4450,20 +4329,19 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu ; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2f16(half* %base, i32 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv2f16( %1, %1, %1, %1, half* %base, %mask, i32 %vl) + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv2f16( %1, %1, %1, %1, half* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,,,,} @llvm.riscv.vlseg5.nxv2f16(half* , i32) -declare {,,,,} @llvm.riscv.vlseg5.mask.nxv2f16(,,,,, half*, , i32) +declare {,,,,} @llvm.riscv.vlseg5.mask.nxv2f16(,,,,, half*, , i32, i32) define @test_vlseg5_nxv2f16(half* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg5_nxv2f16: @@ -4487,20 +4365,19 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu ; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2f16(half* %base, i32 %vl) %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv2f16( %1, %1, %1, %1, %1, half* %base, %mask, i32 %vl) + %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv2f16( %1, %1, %1, %1, %1, half* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } declare {,,,,,} @llvm.riscv.vlseg6.nxv2f16(half* , i32) -declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv2f16(,,,,,, half*, , i32) +declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv2f16(,,,,,, half*, , i32, i32) define @test_vlseg6_nxv2f16(half* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg6_nxv2f16: @@ -4525,20 +4402,19 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu ; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2f16(half* %base, i32 %vl) %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv2f16( %1, %1, %1, %1, %1, %1, half* %base, %mask, i32 %vl) + %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv2f16( %1, %1, %1, %1, %1, %1, half* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } declare {,,,,,,} @llvm.riscv.vlseg7.nxv2f16(half* , i32) -declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2f16(,,,,,,, half*, , i32) +declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2f16(,,,,,,, half*, , i32, i32) define @test_vlseg7_nxv2f16(half* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg7_nxv2f16: @@ -4564,20 +4440,19 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu ; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2f16(half* %base, i32 %vl) %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2f16( %1, %1, %1, %1, %1, %1, %1, half* %base, %mask, i32 %vl) + %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2f16( %1, %1, %1, %1, %1, %1, %1, half* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } declare {,,,,,,,} @llvm.riscv.vlseg8.nxv2f16(half* , i32) -declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2f16(,,,,,,,, half*, , i32) +declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2f16(,,,,,,,, half*, , i32, i32) define @test_vlseg8_nxv2f16(half* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg8_nxv2f16: @@ -4604,20 +4479,19 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu ; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2f16(half* %base, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2f16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %mask, i32 %vl) + %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2f16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlseg2.nxv4f32(float* , i32) -declare {,} @llvm.riscv.vlseg2.mask.nxv4f32(,, float*, , i32) +declare {,} @llvm.riscv.vlseg2.mask.nxv4f32(,, float*, , i32, i32) define @test_vlseg2_nxv4f32(float* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg2_nxv4f32: @@ -4638,20 +4512,19 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vlseg2e32.v v6, (a0) ; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vsetvli zero, zero, e32, m2, tu, mu ; CHECK-NEXT: vlseg2e32.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv4f32(float* %base, i32 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv4f32( %1, %1, float* %base, %mask, i32 %vl) + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv4f32( %1, %1, float* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlseg3.nxv4f32(float* , i32) -declare {,,} @llvm.riscv.vlseg3.mask.nxv4f32(,,, float*, , i32) +declare {,,} @llvm.riscv.vlseg3.mask.nxv4f32(,,, float*, , i32, i32) define @test_vlseg3_nxv4f32(float* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg3_nxv4f32: @@ -4673,20 +4546,19 @@ ; CHECK-NEXT: vlseg3e32.v v6, (a0) ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vsetvli zero, zero, e32, m2, tu, mu ; CHECK-NEXT: vlseg3e32.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4f32(float* %base, i32 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv4f32( %1, %1, %1, float* %base, %mask, i32 %vl) + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv4f32( %1, %1, %1, float* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlseg4.nxv4f32(float* , i32) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv4f32(,,,, float*, , i32) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv4f32(,,,, float*, , i32, i32) define @test_vlseg4_nxv4f32(float* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg4_nxv4f32: @@ -4709,14 +4581,13 @@ ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vsetvli zero, zero, e32, m2, tu, mu ; CHECK-NEXT: vlseg4e32.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4f32(float* %base, i32 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv4f32( %1, %1, %1, %1, float* %base, %mask, i32 %vl) + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv4f32( %1, %1, %1, %1, float* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } diff --git a/llvm/test/CodeGen/RISCV/rvv/vlseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vlseg-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vlseg-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlseg-rv64.ll @@ -3,7 +3,7 @@ ; RUN: -verify-machineinstrs < %s | FileCheck %s declare {,} @llvm.riscv.vlseg2.nxv16i16(i16* , i64) -declare {,} @llvm.riscv.vlseg2.mask.nxv16i16(,, i16*, , i64) +declare {,} @llvm.riscv.vlseg2.mask.nxv16i16(,, i16*, , i64, i64) define @test_vlseg2_nxv16i16(i16* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg2_nxv16i16: @@ -24,20 +24,19 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vlseg2e16.v v4, (a0) ; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: vsetvli zero, zero, e16, m4, tu, mu ; CHECK-NEXT: vlseg2e16.v v4, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv16i16(i16* %base, i64 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv16i16( %1, %1, i16* %base, %mask, i64 %vl) + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv16i16( %1, %1, i16* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlseg2.nxv4i32(i32* , i64) -declare {,} @llvm.riscv.vlseg2.mask.nxv4i32(,, i32*, , i64) +declare {,} @llvm.riscv.vlseg2.mask.nxv4i32(,, i32*, , i64, i64) define @test_vlseg2_nxv4i32(i32* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg2_nxv4i32: @@ -58,20 +57,19 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vlseg2e32.v v6, (a0) ; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vsetvli zero, zero, e32, m2, tu, mu ; CHECK-NEXT: vlseg2e32.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i32(i32* %base, i64 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv4i32( %1, %1, i32* %base, %mask, i64 %vl) + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv4i32( %1, %1, i32* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlseg3.nxv4i32(i32* , i64) -declare {,,} @llvm.riscv.vlseg3.mask.nxv4i32(,,, i32*, , i64) +declare {,,} @llvm.riscv.vlseg3.mask.nxv4i32(,,, i32*, , i64, i64) define @test_vlseg3_nxv4i32(i32* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg3_nxv4i32: @@ -93,20 +91,19 @@ ; CHECK-NEXT: vlseg3e32.v v6, (a0) ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vsetvli zero, zero, e32, m2, tu, mu ; CHECK-NEXT: vlseg3e32.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i32(i32* %base, i64 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv4i32( %1, %1, %1, i32* %base, %mask, i64 %vl) + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv4i32( %1, %1, %1, i32* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlseg4.nxv4i32(i32* , i64) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv4i32(,,,, i32*, , i64) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv4i32(,,,, i32*, , i64, i64) define @test_vlseg4_nxv4i32(i32* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg4_nxv4i32: @@ -129,20 +126,19 @@ ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vsetvli zero, zero, e32, m2, tu, mu ; CHECK-NEXT: vlseg4e32.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4i32(i32* %base, i64 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv4i32( %1, %1, %1, %1, i32* %base, %mask, i64 %vl) + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv4i32( %1, %1, %1, %1, i32* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlseg2.nxv16i8(i8* , i64) -declare {,} @llvm.riscv.vlseg2.mask.nxv16i8(,, i8*, , i64) +declare {,} @llvm.riscv.vlseg2.mask.nxv16i8(,, i8*, , i64, i64) define @test_vlseg2_nxv16i8(i8* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg2_nxv16i8: @@ -163,20 +159,19 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vlseg2e8.v v6, (a0) ; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vsetvli zero, zero, e8, m2, tu, mu ; CHECK-NEXT: vlseg2e8.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv16i8(i8* %base, i64 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv16i8( %1, %1, i8* %base, %mask, i64 %vl) + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv16i8( %1, %1, i8* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlseg3.nxv16i8(i8* , i64) -declare {,,} @llvm.riscv.vlseg3.mask.nxv16i8(,,, i8*, , i64) +declare {,,} @llvm.riscv.vlseg3.mask.nxv16i8(,,, i8*, , i64, i64) define @test_vlseg3_nxv16i8(i8* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg3_nxv16i8: @@ -198,20 +193,19 @@ ; CHECK-NEXT: vlseg3e8.v v6, (a0) ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vsetvli zero, zero, e8, m2, tu, mu ; CHECK-NEXT: vlseg3e8.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv16i8(i8* %base, i64 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv16i8( %1, %1, %1, i8* %base, %mask, i64 %vl) + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv16i8( %1, %1, %1, i8* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlseg4.nxv16i8(i8* , i64) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv16i8(,,,, i8*, , i64) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv16i8(,,,, i8*, , i64, i64) define @test_vlseg4_nxv16i8(i8* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg4_nxv16i8: @@ -234,20 +228,19 @@ ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vsetvli zero, zero, e8, m2, tu, mu ; CHECK-NEXT: vlseg4e8.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv16i8(i8* %base, i64 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv16i8( %1, %1, %1, %1, i8* %base, %mask, i64 %vl) + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv16i8( %1, %1, %1, %1, i8* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlseg2.nxv1i64(i64* , i64) -declare {,} @llvm.riscv.vlseg2.mask.nxv1i64(,, i64*, , i64) +declare {,} @llvm.riscv.vlseg2.mask.nxv1i64(,, i64*, , i64, i64) define @test_vlseg2_nxv1i64(i64* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg2_nxv1i64: @@ -268,20 +261,19 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg2e64.v v7, (a0) ; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; CHECK-NEXT: vlseg2e64.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i64(i64* %base, i64 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv1i64( %1, %1, i64* %base, %mask, i64 %vl) + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv1i64( %1, %1, i64* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlseg3.nxv1i64(i64* , i64) -declare {,,} @llvm.riscv.vlseg3.mask.nxv1i64(,,, i64*, , i64) +declare {,,} @llvm.riscv.vlseg3.mask.nxv1i64(,,, i64*, , i64, i64) define @test_vlseg3_nxv1i64(i64* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg3_nxv1i64: @@ -303,20 +295,19 @@ ; CHECK-NEXT: vlseg3e64.v v7, (a0) ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; CHECK-NEXT: vlseg3e64.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i64(i64* %base, i64 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv1i64( %1, %1, %1, i64* %base, %mask, i64 %vl) + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv1i64( %1, %1, %1, i64* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlseg4.nxv1i64(i64* , i64) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv1i64(,,,, i64*, , i64) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv1i64(,,,, i64*, , i64, i64) define @test_vlseg4_nxv1i64(i64* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg4_nxv1i64: @@ -339,20 +330,19 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; CHECK-NEXT: vlseg4e64.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i64(i64* %base, i64 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv1i64( %1, %1, %1, %1, i64* %base, %mask, i64 %vl) + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv1i64( %1, %1, %1, %1, i64* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,,,,} @llvm.riscv.vlseg5.nxv1i64(i64* , i64) -declare {,,,,} @llvm.riscv.vlseg5.mask.nxv1i64(,,,,, i64*, , i64) +declare {,,,,} @llvm.riscv.vlseg5.mask.nxv1i64(,,,,, i64*, , i64, i64) define @test_vlseg5_nxv1i64(i64* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg5_nxv1i64: @@ -376,20 +366,19 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; CHECK-NEXT: vlseg5e64.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i64(i64* %base, i64 %vl) %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv1i64( %1, %1, %1, %1, %1, i64* %base, %mask, i64 %vl) + %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv1i64( %1, %1, %1, %1, %1, i64* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } declare {,,,,,} @llvm.riscv.vlseg6.nxv1i64(i64* , i64) -declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i64(,,,,,, i64*, , i64) +declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i64(,,,,,, i64*, , i64, i64) define @test_vlseg6_nxv1i64(i64* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg6_nxv1i64: @@ -414,20 +403,19 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; CHECK-NEXT: vlseg6e64.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i64(i64* %base, i64 %vl) %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i64( %1, %1, %1, %1, %1, %1, i64* %base, %mask, i64 %vl) + %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i64( %1, %1, %1, %1, %1, %1, i64* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } declare {,,,,,,} @llvm.riscv.vlseg7.nxv1i64(i64* , i64) -declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i64(,,,,,,, i64*, , i64) +declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i64(,,,,,,, i64*, , i64, i64) define @test_vlseg7_nxv1i64(i64* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg7_nxv1i64: @@ -453,20 +441,19 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; CHECK-NEXT: vlseg7e64.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i64(i64* %base, i64 %vl) %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i64( %1, %1, %1, %1, %1, %1, %1, i64* %base, %mask, i64 %vl) + %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i64( %1, %1, %1, %1, %1, %1, %1, i64* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } declare {,,,,,,,} @llvm.riscv.vlseg8.nxv1i64(i64* , i64) -declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i64(,,,,,,,, i64*, , i64) +declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i64(,,,,,,,, i64*, , i64, i64) define @test_vlseg8_nxv1i64(i64* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg8_nxv1i64: @@ -493,20 +480,19 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; CHECK-NEXT: vlseg8e64.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i64(i64* %base, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i64( %1, %1, %1, %1, %1, %1, %1, %1, i64* %base, %mask, i64 %vl) + %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i64( %1, %1, %1, %1, %1, %1, %1, %1, i64* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlseg2.nxv1i32(i32* , i64) -declare {,} @llvm.riscv.vlseg2.mask.nxv1i32(,, i32*, , i64) +declare {,} @llvm.riscv.vlseg2.mask.nxv1i32(,, i32*, , i64, i64) define @test_vlseg2_nxv1i32(i32* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg2_nxv1i32: @@ -527,20 +513,19 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg2e32.v v7, (a0) ; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; CHECK-NEXT: vlseg2e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i32(i32* %base, i64 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv1i32( %1, %1, i32* %base, %mask, i64 %vl) + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv1i32( %1, %1, i32* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlseg3.nxv1i32(i32* , i64) -declare {,,} @llvm.riscv.vlseg3.mask.nxv1i32(,,, i32*, , i64) +declare {,,} @llvm.riscv.vlseg3.mask.nxv1i32(,,, i32*, , i64, i64) define @test_vlseg3_nxv1i32(i32* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg3_nxv1i32: @@ -562,20 +547,19 @@ ; CHECK-NEXT: vlseg3e32.v v7, (a0) ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; CHECK-NEXT: vlseg3e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i32(i32* %base, i64 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv1i32( %1, %1, %1, i32* %base, %mask, i64 %vl) + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv1i32( %1, %1, %1, i32* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlseg4.nxv1i32(i32* , i64) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv1i32(,,,, i32*, , i64) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv1i32(,,,, i32*, , i64, i64) define @test_vlseg4_nxv1i32(i32* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg4_nxv1i32: @@ -598,20 +582,19 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; CHECK-NEXT: vlseg4e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i32(i32* %base, i64 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv1i32( %1, %1, %1, %1, i32* %base, %mask, i64 %vl) + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv1i32( %1, %1, %1, %1, i32* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,,,,} @llvm.riscv.vlseg5.nxv1i32(i32* , i64) -declare {,,,,} @llvm.riscv.vlseg5.mask.nxv1i32(,,,,, i32*, , i64) +declare {,,,,} @llvm.riscv.vlseg5.mask.nxv1i32(,,,,, i32*, , i64, i64) define @test_vlseg5_nxv1i32(i32* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg5_nxv1i32: @@ -635,20 +618,19 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; CHECK-NEXT: vlseg5e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i32(i32* %base, i64 %vl) %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv1i32( %1, %1, %1, %1, %1, i32* %base, %mask, i64 %vl) + %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv1i32( %1, %1, %1, %1, %1, i32* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } declare {,,,,,} @llvm.riscv.vlseg6.nxv1i32(i32* , i64) -declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i32(,,,,,, i32*, , i64) +declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i32(,,,,,, i32*, , i64, i64) define @test_vlseg6_nxv1i32(i32* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg6_nxv1i32: @@ -673,20 +655,19 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; CHECK-NEXT: vlseg6e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i32(i32* %base, i64 %vl) %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i32( %1, %1, %1, %1, %1, %1, i32* %base, %mask, i64 %vl) + %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i32( %1, %1, %1, %1, %1, %1, i32* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } declare {,,,,,,} @llvm.riscv.vlseg7.nxv1i32(i32* , i64) -declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i32(,,,,,,, i32*, , i64) +declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i32(,,,,,,, i32*, , i64, i64) define @test_vlseg7_nxv1i32(i32* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg7_nxv1i32: @@ -712,20 +693,19 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; CHECK-NEXT: vlseg7e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i32(i32* %base, i64 %vl) %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i32( %1, %1, %1, %1, %1, %1, %1, i32* %base, %mask, i64 %vl) + %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i32( %1, %1, %1, %1, %1, %1, %1, i32* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } declare {,,,,,,,} @llvm.riscv.vlseg8.nxv1i32(i32* , i64) -declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i32(,,,,,,,, i32*, , i64) +declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i32(,,,,,,,, i32*, , i64, i64) define @test_vlseg8_nxv1i32(i32* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg8_nxv1i32: @@ -752,20 +732,19 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; CHECK-NEXT: vlseg8e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i32(i32* %base, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i32( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %mask, i64 %vl) + %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i32( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlseg2.nxv8i16(i16* , i64) -declare {,} @llvm.riscv.vlseg2.mask.nxv8i16(,, i16*, , i64) +declare {,} @llvm.riscv.vlseg2.mask.nxv8i16(,, i16*, , i64, i64) define @test_vlseg2_nxv8i16(i16* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg2_nxv8i16: @@ -786,20 +765,19 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vlseg2e16.v v6, (a0) ; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, tu, mu ; CHECK-NEXT: vlseg2e16.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i16(i16* %base, i64 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv8i16( %1, %1, i16* %base, %mask, i64 %vl) + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv8i16( %1, %1, i16* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlseg3.nxv8i16(i16* , i64) -declare {,,} @llvm.riscv.vlseg3.mask.nxv8i16(,,, i16*, , i64) +declare {,,} @llvm.riscv.vlseg3.mask.nxv8i16(,,, i16*, , i64, i64) define @test_vlseg3_nxv8i16(i16* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg3_nxv8i16: @@ -821,20 +799,19 @@ ; CHECK-NEXT: vlseg3e16.v v6, (a0) ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, tu, mu ; CHECK-NEXT: vlseg3e16.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv8i16(i16* %base, i64 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv8i16( %1, %1, %1, i16* %base, %mask, i64 %vl) + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv8i16( %1, %1, %1, i16* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlseg4.nxv8i16(i16* , i64) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv8i16(,,,, i16*, , i64) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv8i16(,,,, i16*, , i64, i64) define @test_vlseg4_nxv8i16(i16* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg4_nxv8i16: @@ -857,20 +834,19 @@ ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, tu, mu ; CHECK-NEXT: vlseg4e16.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv8i16(i16* %base, i64 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv8i16( %1, %1, %1, %1, i16* %base, %mask, i64 %vl) + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv8i16( %1, %1, %1, %1, i16* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlseg2.nxv4i8(i8* , i64) -declare {,} @llvm.riscv.vlseg2.mask.nxv4i8(,, i8*, , i64) +declare {,} @llvm.riscv.vlseg2.mask.nxv4i8(,, i8*, , i64, i64) define @test_vlseg2_nxv4i8(i8* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg2_nxv4i8: @@ -891,20 +867,19 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vlseg2e8.v v7, (a0) ; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf2, tu, mu ; CHECK-NEXT: vlseg2e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i8(i8* %base, i64 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv4i8( %1, %1, i8* %base, %mask, i64 %vl) + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv4i8( %1, %1, i8* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlseg3.nxv4i8(i8* , i64) -declare {,,} @llvm.riscv.vlseg3.mask.nxv4i8(,,, i8*, , i64) +declare {,,} @llvm.riscv.vlseg3.mask.nxv4i8(,,, i8*, , i64, i64) define @test_vlseg3_nxv4i8(i8* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg3_nxv4i8: @@ -926,20 +901,19 @@ ; CHECK-NEXT: vlseg3e8.v v7, (a0) ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf2, tu, mu ; CHECK-NEXT: vlseg3e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i8(i8* %base, i64 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv4i8( %1, %1, %1, i8* %base, %mask, i64 %vl) + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv4i8( %1, %1, %1, i8* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlseg4.nxv4i8(i8* , i64) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv4i8(,,,, i8*, , i64) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv4i8(,,,, i8*, , i64, i64) define @test_vlseg4_nxv4i8(i8* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg4_nxv4i8: @@ -962,20 +936,19 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf2, tu, mu ; CHECK-NEXT: vlseg4e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4i8(i8* %base, i64 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv4i8( %1, %1, %1, %1, i8* %base, %mask, i64 %vl) + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv4i8( %1, %1, %1, %1, i8* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,,,,} @llvm.riscv.vlseg5.nxv4i8(i8* , i64) -declare {,,,,} @llvm.riscv.vlseg5.mask.nxv4i8(,,,,, i8*, , i64) +declare {,,,,} @llvm.riscv.vlseg5.mask.nxv4i8(,,,,, i8*, , i64, i64) define @test_vlseg5_nxv4i8(i8* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg5_nxv4i8: @@ -999,20 +972,19 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf2, tu, mu ; CHECK-NEXT: vlseg5e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv4i8(i8* %base, i64 %vl) %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv4i8( %1, %1, %1, %1, %1, i8* %base, %mask, i64 %vl) + %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv4i8( %1, %1, %1, %1, %1, i8* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } declare {,,,,,} @llvm.riscv.vlseg6.nxv4i8(i8* , i64) -declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv4i8(,,,,,, i8*, , i64) +declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv4i8(,,,,,, i8*, , i64, i64) define @test_vlseg6_nxv4i8(i8* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg6_nxv4i8: @@ -1037,20 +1009,19 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf2, tu, mu ; CHECK-NEXT: vlseg6e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv4i8(i8* %base, i64 %vl) %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv4i8( %1, %1, %1, %1, %1, %1, i8* %base, %mask, i64 %vl) + %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv4i8( %1, %1, %1, %1, %1, %1, i8* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } declare {,,,,,,} @llvm.riscv.vlseg7.nxv4i8(i8* , i64) -declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv4i8(,,,,,,, i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv4i8(,,,,,,, i8*, , i64, i64) define @test_vlseg7_nxv4i8(i8* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg7_nxv4i8: @@ -1076,20 +1047,19 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf2, tu, mu ; CHECK-NEXT: vlseg7e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv4i8(i8* %base, i64 %vl) %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv4i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %mask, i64 %vl) + %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv4i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } declare {,,,,,,,} @llvm.riscv.vlseg8.nxv4i8(i8* , i64) -declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv4i8(,,,,,,,, i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv4i8(,,,,,,,, i8*, , i64, i64) define @test_vlseg8_nxv4i8(i8* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg8_nxv4i8: @@ -1116,20 +1086,19 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf2, tu, mu ; CHECK-NEXT: vlseg8e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv4i8(i8* %base, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv4i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %mask, i64 %vl) + %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv4i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlseg2.nxv1i16(i16* , i64) -declare {,} @llvm.riscv.vlseg2.mask.nxv1i16(,, i16*, , i64) +declare {,} @llvm.riscv.vlseg2.mask.nxv1i16(,, i16*, , i64, i64) define @test_vlseg2_nxv1i16(i16* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg2_nxv1i16: @@ -1150,20 +1119,19 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg2e16.v v7, (a0) ; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, tu, mu ; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i16(i16* %base, i64 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv1i16( %1, %1, i16* %base, %mask, i64 %vl) + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv1i16( %1, %1, i16* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlseg3.nxv1i16(i16* , i64) -declare {,,} @llvm.riscv.vlseg3.mask.nxv1i16(,,, i16*, , i64) +declare {,,} @llvm.riscv.vlseg3.mask.nxv1i16(,,, i16*, , i64, i64) define @test_vlseg3_nxv1i16(i16* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg3_nxv1i16: @@ -1185,20 +1153,19 @@ ; CHECK-NEXT: vlseg3e16.v v7, (a0) ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, tu, mu ; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i16(i16* %base, i64 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv1i16( %1, %1, %1, i16* %base, %mask, i64 %vl) + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv1i16( %1, %1, %1, i16* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlseg4.nxv1i16(i16* , i64) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv1i16(,,,, i16*, , i64) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv1i16(,,,, i16*, , i64, i64) define @test_vlseg4_nxv1i16(i16* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg4_nxv1i16: @@ -1221,20 +1188,19 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, tu, mu ; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i16(i16* %base, i64 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv1i16( %1, %1, %1, %1, i16* %base, %mask, i64 %vl) + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv1i16( %1, %1, %1, %1, i16* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,,,,} @llvm.riscv.vlseg5.nxv1i16(i16* , i64) -declare {,,,,} @llvm.riscv.vlseg5.mask.nxv1i16(,,,,, i16*, , i64) +declare {,,,,} @llvm.riscv.vlseg5.mask.nxv1i16(,,,,, i16*, , i64, i64) define @test_vlseg5_nxv1i16(i16* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg5_nxv1i16: @@ -1258,20 +1224,19 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, tu, mu ; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i16(i16* %base, i64 %vl) %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv1i16( %1, %1, %1, %1, %1, i16* %base, %mask, i64 %vl) + %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv1i16( %1, %1, %1, %1, %1, i16* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } declare {,,,,,} @llvm.riscv.vlseg6.nxv1i16(i16* , i64) -declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i16(,,,,,, i16*, , i64) +declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i16(,,,,,, i16*, , i64, i64) define @test_vlseg6_nxv1i16(i16* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg6_nxv1i16: @@ -1296,20 +1261,19 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, tu, mu ; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i16(i16* %base, i64 %vl) %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i16( %1, %1, %1, %1, %1, %1, i16* %base, %mask, i64 %vl) + %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i16( %1, %1, %1, %1, %1, %1, i16* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } declare {,,,,,,} @llvm.riscv.vlseg7.nxv1i16(i16* , i64) -declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i16(,,,,,,, i16*, , i64) +declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i16(,,,,,,, i16*, , i64, i64) define @test_vlseg7_nxv1i16(i16* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg7_nxv1i16: @@ -1335,20 +1299,19 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, tu, mu ; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i16(i16* %base, i64 %vl) %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %mask, i64 %vl) + %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } declare {,,,,,,,} @llvm.riscv.vlseg8.nxv1i16(i16* , i64) -declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i16(,,,,,,,, i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i16(,,,,,,,, i16*, , i64, i64) define @test_vlseg8_nxv1i16(i16* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg8_nxv1i16: @@ -1375,20 +1338,19 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, tu, mu ; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i16(i16* %base, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %mask, i64 %vl) + %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlseg2.nxv2i32(i32* , i64) -declare {,} @llvm.riscv.vlseg2.mask.nxv2i32(,, i32*, , i64) +declare {,} @llvm.riscv.vlseg2.mask.nxv2i32(,, i32*, , i64, i64) define @test_vlseg2_nxv2i32(i32* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg2_nxv2i32: @@ -1409,20 +1371,19 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg2e32.v v7, (a0) ; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vlseg2e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i32(i32* %base, i64 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv2i32( %1, %1, i32* %base, %mask, i64 %vl) + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv2i32( %1, %1, i32* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlseg3.nxv2i32(i32* , i64) -declare {,,} @llvm.riscv.vlseg3.mask.nxv2i32(,,, i32*, , i64) +declare {,,} @llvm.riscv.vlseg3.mask.nxv2i32(,,, i32*, , i64, i64) define @test_vlseg3_nxv2i32(i32* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg3_nxv2i32: @@ -1444,20 +1405,19 @@ ; CHECK-NEXT: vlseg3e32.v v7, (a0) ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vlseg3e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i32(i32* %base, i64 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv2i32( %1, %1, %1, i32* %base, %mask, i64 %vl) + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv2i32( %1, %1, %1, i32* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlseg4.nxv2i32(i32* , i64) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv2i32(,,,, i32*, , i64) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv2i32(,,,, i32*, , i64, i64) define @test_vlseg4_nxv2i32(i32* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg4_nxv2i32: @@ -1480,20 +1440,19 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vlseg4e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i32(i32* %base, i64 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv2i32( %1, %1, %1, %1, i32* %base, %mask, i64 %vl) + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv2i32( %1, %1, %1, %1, i32* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,,,,} @llvm.riscv.vlseg5.nxv2i32(i32* , i64) -declare {,,,,} @llvm.riscv.vlseg5.mask.nxv2i32(,,,,, i32*, , i64) +declare {,,,,} @llvm.riscv.vlseg5.mask.nxv2i32(,,,,, i32*, , i64, i64) define @test_vlseg5_nxv2i32(i32* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg5_nxv2i32: @@ -1517,20 +1476,19 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vlseg5e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2i32(i32* %base, i64 %vl) %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv2i32( %1, %1, %1, %1, %1, i32* %base, %mask, i64 %vl) + %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv2i32( %1, %1, %1, %1, %1, i32* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } declare {,,,,,} @llvm.riscv.vlseg6.nxv2i32(i32* , i64) -declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv2i32(,,,,,, i32*, , i64) +declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv2i32(,,,,,, i32*, , i64, i64) define @test_vlseg6_nxv2i32(i32* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg6_nxv2i32: @@ -1555,20 +1513,19 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vlseg6e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2i32(i32* %base, i64 %vl) %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv2i32( %1, %1, %1, %1, %1, %1, i32* %base, %mask, i64 %vl) + %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv2i32( %1, %1, %1, %1, %1, %1, i32* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } declare {,,,,,,} @llvm.riscv.vlseg7.nxv2i32(i32* , i64) -declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2i32(,,,,,,, i32*, , i64) +declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2i32(,,,,,,, i32*, , i64, i64) define @test_vlseg7_nxv2i32(i32* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg7_nxv2i32: @@ -1594,20 +1551,19 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vlseg7e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2i32(i32* %base, i64 %vl) %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2i32( %1, %1, %1, %1, %1, %1, %1, i32* %base, %mask, i64 %vl) + %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2i32( %1, %1, %1, %1, %1, %1, %1, i32* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } declare {,,,,,,,} @llvm.riscv.vlseg8.nxv2i32(i32* , i64) -declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2i32(,,,,,,,, i32*, , i64) +declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2i32(,,,,,,,, i32*, , i64, i64) define @test_vlseg8_nxv2i32(i32* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg8_nxv2i32: @@ -1634,20 +1590,19 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vlseg8e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2i32(i32* %base, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2i32( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %mask, i64 %vl) + %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2i32( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlseg2.nxv8i8(i8* , i64) -declare {,} @llvm.riscv.vlseg2.mask.nxv8i8(,, i8*, , i64) +declare {,} @llvm.riscv.vlseg2.mask.nxv8i8(,, i8*, , i64, i64) define @test_vlseg2_nxv8i8(i8* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg2_nxv8i8: @@ -1668,20 +1623,19 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vlseg2e8.v v7, (a0) ; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, m1, tu, mu ; CHECK-NEXT: vlseg2e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i8(i8* %base, i64 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv8i8( %1, %1, i8* %base, %mask, i64 %vl) + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv8i8( %1, %1, i8* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlseg3.nxv8i8(i8* , i64) -declare {,,} @llvm.riscv.vlseg3.mask.nxv8i8(,,, i8*, , i64) +declare {,,} @llvm.riscv.vlseg3.mask.nxv8i8(,,, i8*, , i64, i64) define @test_vlseg3_nxv8i8(i8* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg3_nxv8i8: @@ -1703,20 +1657,19 @@ ; CHECK-NEXT: vlseg3e8.v v7, (a0) ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, m1, tu, mu ; CHECK-NEXT: vlseg3e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv8i8(i8* %base, i64 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv8i8( %1, %1, %1, i8* %base, %mask, i64 %vl) + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv8i8( %1, %1, %1, i8* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlseg4.nxv8i8(i8* , i64) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv8i8(,,,, i8*, , i64) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv8i8(,,,, i8*, , i64, i64) define @test_vlseg4_nxv8i8(i8* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg4_nxv8i8: @@ -1739,20 +1692,19 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, m1, tu, mu ; CHECK-NEXT: vlseg4e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv8i8(i8* %base, i64 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv8i8( %1, %1, %1, %1, i8* %base, %mask, i64 %vl) + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv8i8( %1, %1, %1, %1, i8* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,,,,} @llvm.riscv.vlseg5.nxv8i8(i8* , i64) -declare {,,,,} @llvm.riscv.vlseg5.mask.nxv8i8(,,,,, i8*, , i64) +declare {,,,,} @llvm.riscv.vlseg5.mask.nxv8i8(,,,,, i8*, , i64, i64) define @test_vlseg5_nxv8i8(i8* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg5_nxv8i8: @@ -1776,20 +1728,19 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, m1, tu, mu ; CHECK-NEXT: vlseg5e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv8i8(i8* %base, i64 %vl) %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv8i8( %1, %1, %1, %1, %1, i8* %base, %mask, i64 %vl) + %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv8i8( %1, %1, %1, %1, %1, i8* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } declare {,,,,,} @llvm.riscv.vlseg6.nxv8i8(i8* , i64) -declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv8i8(,,,,,, i8*, , i64) +declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv8i8(,,,,,, i8*, , i64, i64) define @test_vlseg6_nxv8i8(i8* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg6_nxv8i8: @@ -1814,20 +1765,19 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, m1, tu, mu ; CHECK-NEXT: vlseg6e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv8i8(i8* %base, i64 %vl) %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv8i8( %1, %1, %1, %1, %1, %1, i8* %base, %mask, i64 %vl) + %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv8i8( %1, %1, %1, %1, %1, %1, i8* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } declare {,,,,,,} @llvm.riscv.vlseg7.nxv8i8(i8* , i64) -declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv8i8(,,,,,,, i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv8i8(,,,,,,, i8*, , i64, i64) define @test_vlseg7_nxv8i8(i8* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg7_nxv8i8: @@ -1853,20 +1803,19 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, m1, tu, mu ; CHECK-NEXT: vlseg7e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv8i8(i8* %base, i64 %vl) %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv8i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %mask, i64 %vl) + %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv8i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } declare {,,,,,,,} @llvm.riscv.vlseg8.nxv8i8(i8* , i64) -declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv8i8(,,,,,,,, i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv8i8(,,,,,,,, i8*, , i64, i64) define @test_vlseg8_nxv8i8(i8* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg8_nxv8i8: @@ -1893,20 +1842,19 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, m1, tu, mu ; CHECK-NEXT: vlseg8e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv8i8(i8* %base, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv8i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %mask, i64 %vl) + %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv8i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlseg2.nxv4i64(i64* , i64) -declare {,} @llvm.riscv.vlseg2.mask.nxv4i64(,, i64*, , i64) +declare {,} @llvm.riscv.vlseg2.mask.nxv4i64(,, i64*, , i64, i64) define @test_vlseg2_nxv4i64(i64* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg2_nxv4i64: @@ -1927,20 +1875,19 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vlseg2e64.v v4, (a0) ; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu ; CHECK-NEXT: vlseg2e64.v v4, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i64(i64* %base, i64 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv4i64( %1, %1, i64* %base, %mask, i64 %vl) + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv4i64( %1, %1, i64* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlseg2.nxv4i16(i16* , i64) -declare {,} @llvm.riscv.vlseg2.mask.nxv4i16(,, i16*, , i64) +declare {,} @llvm.riscv.vlseg2.mask.nxv4i16(,, i16*, , i64, i64) define @test_vlseg2_nxv4i16(i16* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg2_nxv4i16: @@ -1961,20 +1908,19 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg2e16.v v7, (a0) ; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i16(i16* %base, i64 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv4i16( %1, %1, i16* %base, %mask, i64 %vl) + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv4i16( %1, %1, i16* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlseg3.nxv4i16(i16* , i64) -declare {,,} @llvm.riscv.vlseg3.mask.nxv4i16(,,, i16*, , i64) +declare {,,} @llvm.riscv.vlseg3.mask.nxv4i16(,,, i16*, , i64, i64) define @test_vlseg3_nxv4i16(i16* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg3_nxv4i16: @@ -1996,20 +1942,19 @@ ; CHECK-NEXT: vlseg3e16.v v7, (a0) ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i16(i16* %base, i64 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv4i16( %1, %1, %1, i16* %base, %mask, i64 %vl) + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv4i16( %1, %1, %1, i16* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlseg4.nxv4i16(i16* , i64) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv4i16(,,,, i16*, , i64) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv4i16(,,,, i16*, , i64, i64) define @test_vlseg4_nxv4i16(i16* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg4_nxv4i16: @@ -2032,20 +1977,19 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4i16(i16* %base, i64 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv4i16( %1, %1, %1, %1, i16* %base, %mask, i64 %vl) + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv4i16( %1, %1, %1, %1, i16* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,,,,} @llvm.riscv.vlseg5.nxv4i16(i16* , i64) -declare {,,,,} @llvm.riscv.vlseg5.mask.nxv4i16(,,,,, i16*, , i64) +declare {,,,,} @llvm.riscv.vlseg5.mask.nxv4i16(,,,,, i16*, , i64, i64) define @test_vlseg5_nxv4i16(i16* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg5_nxv4i16: @@ -2069,20 +2013,19 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv4i16(i16* %base, i64 %vl) %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv4i16( %1, %1, %1, %1, %1, i16* %base, %mask, i64 %vl) + %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv4i16( %1, %1, %1, %1, %1, i16* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } declare {,,,,,} @llvm.riscv.vlseg6.nxv4i16(i16* , i64) -declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv4i16(,,,,,, i16*, , i64) +declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv4i16(,,,,,, i16*, , i64, i64) define @test_vlseg6_nxv4i16(i16* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg6_nxv4i16: @@ -2107,20 +2050,19 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv4i16(i16* %base, i64 %vl) %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv4i16( %1, %1, %1, %1, %1, %1, i16* %base, %mask, i64 %vl) + %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv4i16( %1, %1, %1, %1, %1, %1, i16* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } declare {,,,,,,} @llvm.riscv.vlseg7.nxv4i16(i16* , i64) -declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv4i16(,,,,,,, i16*, , i64) +declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv4i16(,,,,,,, i16*, , i64, i64) define @test_vlseg7_nxv4i16(i16* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg7_nxv4i16: @@ -2146,20 +2088,19 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv4i16(i16* %base, i64 %vl) %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv4i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %mask, i64 %vl) + %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv4i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } declare {,,,,,,,} @llvm.riscv.vlseg8.nxv4i16(i16* , i64) -declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv4i16(,,,,,,,, i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv4i16(,,,,,,,, i16*, , i64, i64) define @test_vlseg8_nxv4i16(i16* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg8_nxv4i16: @@ -2186,20 +2127,19 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv4i16(i16* %base, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv4i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %mask, i64 %vl) + %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv4i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlseg2.nxv1i8(i8* , i64) -declare {,} @llvm.riscv.vlseg2.mask.nxv1i8(,, i8*, , i64) +declare {,} @llvm.riscv.vlseg2.mask.nxv1i8(,, i8*, , i64, i64) define @test_vlseg2_nxv1i8(i8* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg2_nxv1i8: @@ -2220,20 +2160,19 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vlseg2e8.v v7, (a0) ; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, tu, mu ; CHECK-NEXT: vlseg2e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i8(i8* %base, i64 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv1i8( %1, %1, i8* %base, %mask, i64 %vl) + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv1i8( %1, %1, i8* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlseg3.nxv1i8(i8* , i64) -declare {,,} @llvm.riscv.vlseg3.mask.nxv1i8(,,, i8*, , i64) +declare {,,} @llvm.riscv.vlseg3.mask.nxv1i8(,,, i8*, , i64, i64) define @test_vlseg3_nxv1i8(i8* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg3_nxv1i8: @@ -2255,20 +2194,19 @@ ; CHECK-NEXT: vlseg3e8.v v7, (a0) ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, tu, mu ; CHECK-NEXT: vlseg3e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i8(i8* %base, i64 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv1i8( %1, %1, %1, i8* %base, %mask, i64 %vl) + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv1i8( %1, %1, %1, i8* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlseg4.nxv1i8(i8* , i64) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv1i8(,,,, i8*, , i64) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv1i8(,,,, i8*, , i64, i64) define @test_vlseg4_nxv1i8(i8* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg4_nxv1i8: @@ -2291,20 +2229,19 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, tu, mu ; CHECK-NEXT: vlseg4e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i8(i8* %base, i64 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv1i8( %1, %1, %1, %1, i8* %base, %mask, i64 %vl) + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv1i8( %1, %1, %1, %1, i8* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,,,,} @llvm.riscv.vlseg5.nxv1i8(i8* , i64) -declare {,,,,} @llvm.riscv.vlseg5.mask.nxv1i8(,,,,, i8*, , i64) +declare {,,,,} @llvm.riscv.vlseg5.mask.nxv1i8(,,,,, i8*, , i64, i64) define @test_vlseg5_nxv1i8(i8* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg5_nxv1i8: @@ -2328,20 +2265,19 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, tu, mu ; CHECK-NEXT: vlseg5e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i8(i8* %base, i64 %vl) %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv1i8( %1, %1, %1, %1, %1, i8* %base, %mask, i64 %vl) + %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv1i8( %1, %1, %1, %1, %1, i8* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } declare {,,,,,} @llvm.riscv.vlseg6.nxv1i8(i8* , i64) -declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i8(,,,,,, i8*, , i64) +declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i8(,,,,,, i8*, , i64, i64) define @test_vlseg6_nxv1i8(i8* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg6_nxv1i8: @@ -2366,20 +2302,19 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, tu, mu ; CHECK-NEXT: vlseg6e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i8(i8* %base, i64 %vl) %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i8( %1, %1, %1, %1, %1, %1, i8* %base, %mask, i64 %vl) + %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i8( %1, %1, %1, %1, %1, %1, i8* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } declare {,,,,,,} @llvm.riscv.vlseg7.nxv1i8(i8* , i64) -declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i8(,,,,,,, i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i8(,,,,,,, i8*, , i64, i64) define @test_vlseg7_nxv1i8(i8* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg7_nxv1i8: @@ -2405,20 +2340,19 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, tu, mu ; CHECK-NEXT: vlseg7e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i8(i8* %base, i64 %vl) %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %mask, i64 %vl) + %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } declare {,,,,,,,} @llvm.riscv.vlseg8.nxv1i8(i8* , i64) -declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i8(,,,,,,,, i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i8(,,,,,,,, i8*, , i64, i64) define @test_vlseg8_nxv1i8(i8* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg8_nxv1i8: @@ -2445,20 +2379,19 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, tu, mu ; CHECK-NEXT: vlseg8e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i8(i8* %base, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %mask, i64 %vl) + %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlseg2.nxv2i8(i8* , i64) -declare {,} @llvm.riscv.vlseg2.mask.nxv2i8(,, i8*, , i64) +declare {,} @llvm.riscv.vlseg2.mask.nxv2i8(,, i8*, , i64, i64) define @test_vlseg2_nxv2i8(i8* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg2_nxv2i8: @@ -2479,20 +2412,19 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vlseg2e8.v v7, (a0) ; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, tu, mu ; CHECK-NEXT: vlseg2e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i8(i8* %base, i64 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv2i8( %1, %1, i8* %base, %mask, i64 %vl) + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv2i8( %1, %1, i8* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlseg3.nxv2i8(i8* , i64) -declare {,,} @llvm.riscv.vlseg3.mask.nxv2i8(,,, i8*, , i64) +declare {,,} @llvm.riscv.vlseg3.mask.nxv2i8(,,, i8*, , i64, i64) define @test_vlseg3_nxv2i8(i8* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg3_nxv2i8: @@ -2514,20 +2446,19 @@ ; CHECK-NEXT: vlseg3e8.v v7, (a0) ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, tu, mu ; CHECK-NEXT: vlseg3e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i8(i8* %base, i64 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv2i8( %1, %1, %1, i8* %base, %mask, i64 %vl) + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv2i8( %1, %1, %1, i8* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlseg4.nxv2i8(i8* , i64) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv2i8(,,,, i8*, , i64) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv2i8(,,,, i8*, , i64, i64) define @test_vlseg4_nxv2i8(i8* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg4_nxv2i8: @@ -2550,20 +2481,19 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, tu, mu ; CHECK-NEXT: vlseg4e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i8(i8* %base, i64 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv2i8( %1, %1, %1, %1, i8* %base, %mask, i64 %vl) + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv2i8( %1, %1, %1, %1, i8* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,,,,} @llvm.riscv.vlseg5.nxv2i8(i8* , i64) -declare {,,,,} @llvm.riscv.vlseg5.mask.nxv2i8(,,,,, i8*, , i64) +declare {,,,,} @llvm.riscv.vlseg5.mask.nxv2i8(,,,,, i8*, , i64, i64) define @test_vlseg5_nxv2i8(i8* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg5_nxv2i8: @@ -2587,20 +2517,19 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, tu, mu ; CHECK-NEXT: vlseg5e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2i8(i8* %base, i64 %vl) %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv2i8( %1, %1, %1, %1, %1, i8* %base, %mask, i64 %vl) + %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv2i8( %1, %1, %1, %1, %1, i8* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } declare {,,,,,} @llvm.riscv.vlseg6.nxv2i8(i8* , i64) -declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv2i8(,,,,,, i8*, , i64) +declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv2i8(,,,,,, i8*, , i64, i64) define @test_vlseg6_nxv2i8(i8* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg6_nxv2i8: @@ -2625,20 +2554,19 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, tu, mu ; CHECK-NEXT: vlseg6e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2i8(i8* %base, i64 %vl) %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv2i8( %1, %1, %1, %1, %1, %1, i8* %base, %mask, i64 %vl) + %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv2i8( %1, %1, %1, %1, %1, %1, i8* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } declare {,,,,,,} @llvm.riscv.vlseg7.nxv2i8(i8* , i64) -declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2i8(,,,,,,, i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2i8(,,,,,,, i8*, , i64, i64) define @test_vlseg7_nxv2i8(i8* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg7_nxv2i8: @@ -2664,20 +2592,19 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, tu, mu ; CHECK-NEXT: vlseg7e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2i8(i8* %base, i64 %vl) %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %mask, i64 %vl) + %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } declare {,,,,,,,} @llvm.riscv.vlseg8.nxv2i8(i8* , i64) -declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2i8(,,,,,,,, i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2i8(,,,,,,,, i8*, , i64, i64) define @test_vlseg8_nxv2i8(i8* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg8_nxv2i8: @@ -2704,20 +2631,19 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, tu, mu ; CHECK-NEXT: vlseg8e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2i8(i8* %base, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %mask, i64 %vl) + %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlseg2.nxv8i32(i32* , i64) -declare {,} @llvm.riscv.vlseg2.mask.nxv8i32(,, i32*, , i64) +declare {,} @llvm.riscv.vlseg2.mask.nxv8i32(,, i32*, , i64, i64) define @test_vlseg2_nxv8i32(i32* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg2_nxv8i32: @@ -2738,20 +2664,19 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vlseg2e32.v v4, (a0) ; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: vsetvli zero, zero, e32, m4, tu, mu ; CHECK-NEXT: vlseg2e32.v v4, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i32(i32* %base, i64 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv8i32( %1, %1, i32* %base, %mask, i64 %vl) + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv8i32( %1, %1, i32* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlseg2.nxv32i8(i8* , i64) -declare {,} @llvm.riscv.vlseg2.mask.nxv32i8(,, i8*, , i64) +declare {,} @llvm.riscv.vlseg2.mask.nxv32i8(,, i8*, , i64, i64) define @test_vlseg2_nxv32i8(i8* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg2_nxv32i8: @@ -2772,20 +2697,19 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vlseg2e8.v v4, (a0) ; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: vsetvli zero, zero, e8, m4, tu, mu ; CHECK-NEXT: vlseg2e8.v v4, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv32i8(i8* %base, i64 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv32i8( %1, %1, i8* %base, %mask, i64 %vl) + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv32i8( %1, %1, i8* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlseg2.nxv2i16(i16* , i64) -declare {,} @llvm.riscv.vlseg2.mask.nxv2i16(,, i16*, , i64) +declare {,} @llvm.riscv.vlseg2.mask.nxv2i16(,, i16*, , i64, i64) define @test_vlseg2_nxv2i16(i16* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg2_nxv2i16: @@ -2806,20 +2730,19 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg2e16.v v7, (a0) ; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu ; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i16(i16* %base, i64 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv2i16( %1, %1, i16* %base, %mask, i64 %vl) + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv2i16( %1, %1, i16* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlseg3.nxv2i16(i16* , i64) -declare {,,} @llvm.riscv.vlseg3.mask.nxv2i16(,,, i16*, , i64) +declare {,,} @llvm.riscv.vlseg3.mask.nxv2i16(,,, i16*, , i64, i64) define @test_vlseg3_nxv2i16(i16* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg3_nxv2i16: @@ -2841,20 +2764,19 @@ ; CHECK-NEXT: vlseg3e16.v v7, (a0) ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu ; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i16(i16* %base, i64 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv2i16( %1, %1, %1, i16* %base, %mask, i64 %vl) + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv2i16( %1, %1, %1, i16* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlseg4.nxv2i16(i16* , i64) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv2i16(,,,, i16*, , i64) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv2i16(,,,, i16*, , i64, i64) define @test_vlseg4_nxv2i16(i16* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg4_nxv2i16: @@ -2877,20 +2799,19 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu ; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i16(i16* %base, i64 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv2i16( %1, %1, %1, %1, i16* %base, %mask, i64 %vl) + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv2i16( %1, %1, %1, %1, i16* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,,,,} @llvm.riscv.vlseg5.nxv2i16(i16* , i64) -declare {,,,,} @llvm.riscv.vlseg5.mask.nxv2i16(,,,,, i16*, , i64) +declare {,,,,} @llvm.riscv.vlseg5.mask.nxv2i16(,,,,, i16*, , i64, i64) define @test_vlseg5_nxv2i16(i16* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg5_nxv2i16: @@ -2914,20 +2835,19 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu ; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2i16(i16* %base, i64 %vl) %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv2i16( %1, %1, %1, %1, %1, i16* %base, %mask, i64 %vl) + %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv2i16( %1, %1, %1, %1, %1, i16* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } declare {,,,,,} @llvm.riscv.vlseg6.nxv2i16(i16* , i64) -declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv2i16(,,,,,, i16*, , i64) +declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv2i16(,,,,,, i16*, , i64, i64) define @test_vlseg6_nxv2i16(i16* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg6_nxv2i16: @@ -2952,20 +2872,19 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu ; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2i16(i16* %base, i64 %vl) %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv2i16( %1, %1, %1, %1, %1, %1, i16* %base, %mask, i64 %vl) + %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv2i16( %1, %1, %1, %1, %1, %1, i16* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } declare {,,,,,,} @llvm.riscv.vlseg7.nxv2i16(i16* , i64) -declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2i16(,,,,,,, i16*, , i64) +declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2i16(,,,,,,, i16*, , i64, i64) define @test_vlseg7_nxv2i16(i16* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg7_nxv2i16: @@ -2991,20 +2910,19 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu ; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2i16(i16* %base, i64 %vl) %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %mask, i64 %vl) + %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } declare {,,,,,,,} @llvm.riscv.vlseg8.nxv2i16(i16* , i64) -declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2i16(,,,,,,,, i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2i16(,,,,,,,, i16*, , i64, i64) define @test_vlseg8_nxv2i16(i16* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg8_nxv2i16: @@ -3031,20 +2949,19 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu ; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2i16(i16* %base, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %mask, i64 %vl) + %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlseg2.nxv2i64(i64* , i64) -declare {,} @llvm.riscv.vlseg2.mask.nxv2i64(,, i64*, , i64) +declare {,} @llvm.riscv.vlseg2.mask.nxv2i64(,, i64*, , i64, i64) define @test_vlseg2_nxv2i64(i64* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg2_nxv2i64: @@ -3065,20 +2982,19 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vlseg2e64.v v6, (a0) ; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu ; CHECK-NEXT: vlseg2e64.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i64(i64* %base, i64 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv2i64( %1, %1, i64* %base, %mask, i64 %vl) + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv2i64( %1, %1, i64* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlseg3.nxv2i64(i64* , i64) -declare {,,} @llvm.riscv.vlseg3.mask.nxv2i64(,,, i64*, , i64) +declare {,,} @llvm.riscv.vlseg3.mask.nxv2i64(,,, i64*, , i64, i64) define @test_vlseg3_nxv2i64(i64* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg3_nxv2i64: @@ -3100,20 +3016,19 @@ ; CHECK-NEXT: vlseg3e64.v v6, (a0) ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu ; CHECK-NEXT: vlseg3e64.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i64(i64* %base, i64 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv2i64( %1, %1, %1, i64* %base, %mask, i64 %vl) + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv2i64( %1, %1, %1, i64* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlseg4.nxv2i64(i64* , i64) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv2i64(,,,, i64*, , i64) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv2i64(,,,, i64*, , i64, i64) define @test_vlseg4_nxv2i64(i64* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg4_nxv2i64: @@ -3136,20 +3051,19 @@ ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu ; CHECK-NEXT: vlseg4e64.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i64(i64* %base, i64 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv2i64( %1, %1, %1, %1, i64* %base, %mask, i64 %vl) + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv2i64( %1, %1, %1, %1, i64* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlseg2.nxv16f16(half* , i64) -declare {,} @llvm.riscv.vlseg2.mask.nxv16f16(,, half*, , i64) +declare {,} @llvm.riscv.vlseg2.mask.nxv16f16(,, half*, , i64, i64) define @test_vlseg2_nxv16f16(half* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg2_nxv16f16: @@ -3170,20 +3084,19 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vlseg2e16.v v4, (a0) ; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: vsetvli zero, zero, e16, m4, tu, mu ; CHECK-NEXT: vlseg2e16.v v4, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv16f16(half* %base, i64 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv16f16( %1, %1, half* %base, %mask, i64 %vl) + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv16f16( %1, %1, half* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlseg2.nxv4f64(double* , i64) -declare {,} @llvm.riscv.vlseg2.mask.nxv4f64(,, double*, , i64) +declare {,} @llvm.riscv.vlseg2.mask.nxv4f64(,, double*, , i64, i64) define @test_vlseg2_nxv4f64(double* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg2_nxv4f64: @@ -3204,20 +3117,19 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vlseg2e64.v v4, (a0) ; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu ; CHECK-NEXT: vlseg2e64.v v4, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv4f64(double* %base, i64 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv4f64( %1, %1, double* %base, %mask, i64 %vl) + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv4f64( %1, %1, double* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlseg2.nxv1f64(double* , i64) -declare {,} @llvm.riscv.vlseg2.mask.nxv1f64(,, double*, , i64) +declare {,} @llvm.riscv.vlseg2.mask.nxv1f64(,, double*, , i64, i64) define @test_vlseg2_nxv1f64(double* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg2_nxv1f64: @@ -3238,20 +3150,19 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg2e64.v v7, (a0) ; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; CHECK-NEXT: vlseg2e64.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv1f64(double* %base, i64 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv1f64( %1, %1, double* %base, %mask, i64 %vl) + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv1f64( %1, %1, double* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlseg3.nxv1f64(double* , i64) -declare {,,} @llvm.riscv.vlseg3.mask.nxv1f64(,,, double*, , i64) +declare {,,} @llvm.riscv.vlseg3.mask.nxv1f64(,,, double*, , i64, i64) define @test_vlseg3_nxv1f64(double* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg3_nxv1f64: @@ -3273,20 +3184,19 @@ ; CHECK-NEXT: vlseg3e64.v v7, (a0) ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; CHECK-NEXT: vlseg3e64.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1f64(double* %base, i64 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv1f64( %1, %1, %1, double* %base, %mask, i64 %vl) + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv1f64( %1, %1, %1, double* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlseg4.nxv1f64(double* , i64) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv1f64(,,,, double*, , i64) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv1f64(,,,, double*, , i64, i64) define @test_vlseg4_nxv1f64(double* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg4_nxv1f64: @@ -3309,20 +3219,19 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; CHECK-NEXT: vlseg4e64.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1f64(double* %base, i64 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv1f64( %1, %1, %1, %1, double* %base, %mask, i64 %vl) + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv1f64( %1, %1, %1, %1, double* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,,,,} @llvm.riscv.vlseg5.nxv1f64(double* , i64) -declare {,,,,} @llvm.riscv.vlseg5.mask.nxv1f64(,,,,, double*, , i64) +declare {,,,,} @llvm.riscv.vlseg5.mask.nxv1f64(,,,,, double*, , i64, i64) define @test_vlseg5_nxv1f64(double* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg5_nxv1f64: @@ -3346,20 +3255,19 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; CHECK-NEXT: vlseg5e64.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1f64(double* %base, i64 %vl) %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv1f64( %1, %1, %1, %1, %1, double* %base, %mask, i64 %vl) + %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv1f64( %1, %1, %1, %1, %1, double* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } declare {,,,,,} @llvm.riscv.vlseg6.nxv1f64(double* , i64) -declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv1f64(,,,,,, double*, , i64) +declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv1f64(,,,,,, double*, , i64, i64) define @test_vlseg6_nxv1f64(double* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg6_nxv1f64: @@ -3384,20 +3292,19 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; CHECK-NEXT: vlseg6e64.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1f64(double* %base, i64 %vl) %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv1f64( %1, %1, %1, %1, %1, %1, double* %base, %mask, i64 %vl) + %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv1f64( %1, %1, %1, %1, %1, %1, double* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } declare {,,,,,,} @llvm.riscv.vlseg7.nxv1f64(double* , i64) -declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1f64(,,,,,,, double*, , i64) +declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1f64(,,,,,,, double*, , i64, i64) define @test_vlseg7_nxv1f64(double* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg7_nxv1f64: @@ -3423,20 +3330,19 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; CHECK-NEXT: vlseg7e64.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1f64(double* %base, i64 %vl) %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1f64( %1, %1, %1, %1, %1, %1, %1, double* %base, %mask, i64 %vl) + %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1f64( %1, %1, %1, %1, %1, %1, %1, double* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } declare {,,,,,,,} @llvm.riscv.vlseg8.nxv1f64(double* , i64) -declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1f64(,,,,,,,, double*, , i64) +declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1f64(,,,,,,,, double*, , i64, i64) define @test_vlseg8_nxv1f64(double* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg8_nxv1f64: @@ -3463,20 +3369,19 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; CHECK-NEXT: vlseg8e64.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1f64(double* %base, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1f64( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %mask, i64 %vl) + %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1f64( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlseg2.nxv2f32(float* , i64) -declare {,} @llvm.riscv.vlseg2.mask.nxv2f32(,, float*, , i64) +declare {,} @llvm.riscv.vlseg2.mask.nxv2f32(,, float*, , i64, i64) define @test_vlseg2_nxv2f32(float* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg2_nxv2f32: @@ -3497,20 +3402,19 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg2e32.v v7, (a0) ; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vlseg2e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv2f32(float* %base, i64 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv2f32( %1, %1, float* %base, %mask, i64 %vl) + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv2f32( %1, %1, float* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlseg3.nxv2f32(float* , i64) -declare {,,} @llvm.riscv.vlseg3.mask.nxv2f32(,,, float*, , i64) +declare {,,} @llvm.riscv.vlseg3.mask.nxv2f32(,,, float*, , i64, i64) define @test_vlseg3_nxv2f32(float* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg3_nxv2f32: @@ -3532,20 +3436,19 @@ ; CHECK-NEXT: vlseg3e32.v v7, (a0) ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vlseg3e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2f32(float* %base, i64 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv2f32( %1, %1, %1, float* %base, %mask, i64 %vl) + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv2f32( %1, %1, %1, float* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlseg4.nxv2f32(float* , i64) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv2f32(,,,, float*, , i64) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv2f32(,,,, float*, , i64, i64) define @test_vlseg4_nxv2f32(float* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg4_nxv2f32: @@ -3568,20 +3471,19 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vlseg4e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2f32(float* %base, i64 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv2f32( %1, %1, %1, %1, float* %base, %mask, i64 %vl) + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv2f32( %1, %1, %1, %1, float* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,,,,} @llvm.riscv.vlseg5.nxv2f32(float* , i64) -declare {,,,,} @llvm.riscv.vlseg5.mask.nxv2f32(,,,,, float*, , i64) +declare {,,,,} @llvm.riscv.vlseg5.mask.nxv2f32(,,,,, float*, , i64, i64) define @test_vlseg5_nxv2f32(float* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg5_nxv2f32: @@ -3605,20 +3507,19 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vlseg5e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2f32(float* %base, i64 %vl) %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv2f32( %1, %1, %1, %1, %1, float* %base, %mask, i64 %vl) + %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv2f32( %1, %1, %1, %1, %1, float* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } declare {,,,,,} @llvm.riscv.vlseg6.nxv2f32(float* , i64) -declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv2f32(,,,,,, float*, , i64) +declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv2f32(,,,,,, float*, , i64, i64) define @test_vlseg6_nxv2f32(float* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg6_nxv2f32: @@ -3643,20 +3544,19 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vlseg6e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2f32(float* %base, i64 %vl) %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv2f32( %1, %1, %1, %1, %1, %1, float* %base, %mask, i64 %vl) + %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv2f32( %1, %1, %1, %1, %1, %1, float* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } declare {,,,,,,} @llvm.riscv.vlseg7.nxv2f32(float* , i64) -declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2f32(,,,,,,, float*, , i64) +declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2f32(,,,,,,, float*, , i64, i64) define @test_vlseg7_nxv2f32(float* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg7_nxv2f32: @@ -3682,20 +3582,19 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vlseg7e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2f32(float* %base, i64 %vl) %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2f32( %1, %1, %1, %1, %1, %1, %1, float* %base, %mask, i64 %vl) + %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2f32( %1, %1, %1, %1, %1, %1, %1, float* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } declare {,,,,,,,} @llvm.riscv.vlseg8.nxv2f32(float* , i64) -declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2f32(,,,,,,,, float*, , i64) +declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2f32(,,,,,,,, float*, , i64, i64) define @test_vlseg8_nxv2f32(float* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg8_nxv2f32: @@ -3722,20 +3621,19 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vlseg8e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2f32(float* %base, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2f32( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %mask, i64 %vl) + %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2f32( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlseg2.nxv1f16(half* , i64) -declare {,} @llvm.riscv.vlseg2.mask.nxv1f16(,, half*, , i64) +declare {,} @llvm.riscv.vlseg2.mask.nxv1f16(,, half*, , i64, i64) define @test_vlseg2_nxv1f16(half* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg2_nxv1f16: @@ -3756,20 +3654,19 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg2e16.v v7, (a0) ; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, tu, mu ; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv1f16(half* %base, i64 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv1f16( %1, %1, half* %base, %mask, i64 %vl) + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv1f16( %1, %1, half* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlseg3.nxv1f16(half* , i64) -declare {,,} @llvm.riscv.vlseg3.mask.nxv1f16(,,, half*, , i64) +declare {,,} @llvm.riscv.vlseg3.mask.nxv1f16(,,, half*, , i64, i64) define @test_vlseg3_nxv1f16(half* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg3_nxv1f16: @@ -3791,20 +3688,19 @@ ; CHECK-NEXT: vlseg3e16.v v7, (a0) ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, tu, mu ; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1f16(half* %base, i64 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv1f16( %1, %1, %1, half* %base, %mask, i64 %vl) + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv1f16( %1, %1, %1, half* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlseg4.nxv1f16(half* , i64) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv1f16(,,,, half*, , i64) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv1f16(,,,, half*, , i64, i64) define @test_vlseg4_nxv1f16(half* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg4_nxv1f16: @@ -3827,20 +3723,19 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, tu, mu ; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1f16(half* %base, i64 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv1f16( %1, %1, %1, %1, half* %base, %mask, i64 %vl) + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv1f16( %1, %1, %1, %1, half* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,,,,} @llvm.riscv.vlseg5.nxv1f16(half* , i64) -declare {,,,,} @llvm.riscv.vlseg5.mask.nxv1f16(,,,,, half*, , i64) +declare {,,,,} @llvm.riscv.vlseg5.mask.nxv1f16(,,,,, half*, , i64, i64) define @test_vlseg5_nxv1f16(half* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg5_nxv1f16: @@ -3864,20 +3759,19 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, tu, mu ; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1f16(half* %base, i64 %vl) %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv1f16( %1, %1, %1, %1, %1, half* %base, %mask, i64 %vl) + %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv1f16( %1, %1, %1, %1, %1, half* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } declare {,,,,,} @llvm.riscv.vlseg6.nxv1f16(half* , i64) -declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv1f16(,,,,,, half*, , i64) +declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv1f16(,,,,,, half*, , i64, i64) define @test_vlseg6_nxv1f16(half* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg6_nxv1f16: @@ -3902,20 +3796,19 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, tu, mu ; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1f16(half* %base, i64 %vl) %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv1f16( %1, %1, %1, %1, %1, %1, half* %base, %mask, i64 %vl) + %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv1f16( %1, %1, %1, %1, %1, %1, half* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } declare {,,,,,,} @llvm.riscv.vlseg7.nxv1f16(half* , i64) -declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1f16(,,,,,,, half*, , i64) +declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1f16(,,,,,,, half*, , i64, i64) define @test_vlseg7_nxv1f16(half* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg7_nxv1f16: @@ -3941,20 +3834,19 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, tu, mu ; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1f16(half* %base, i64 %vl) %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1f16( %1, %1, %1, %1, %1, %1, %1, half* %base, %mask, i64 %vl) + %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1f16( %1, %1, %1, %1, %1, %1, %1, half* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } declare {,,,,,,,} @llvm.riscv.vlseg8.nxv1f16(half* , i64) -declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1f16(,,,,,,,, half*, , i64) +declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1f16(,,,,,,,, half*, , i64, i64) define @test_vlseg8_nxv1f16(half* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg8_nxv1f16: @@ -3981,20 +3873,19 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, tu, mu ; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1f16(half* %base, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1f16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %mask, i64 %vl) + %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1f16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlseg2.nxv1f32(float* , i64) -declare {,} @llvm.riscv.vlseg2.mask.nxv1f32(,, float*, , i64) +declare {,} @llvm.riscv.vlseg2.mask.nxv1f32(,, float*, , i64, i64) define @test_vlseg2_nxv1f32(float* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg2_nxv1f32: @@ -4015,20 +3906,19 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg2e32.v v7, (a0) ; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; CHECK-NEXT: vlseg2e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv1f32(float* %base, i64 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv1f32( %1, %1, float* %base, %mask, i64 %vl) + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv1f32( %1, %1, float* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlseg3.nxv1f32(float* , i64) -declare {,,} @llvm.riscv.vlseg3.mask.nxv1f32(,,, float*, , i64) +declare {,,} @llvm.riscv.vlseg3.mask.nxv1f32(,,, float*, , i64, i64) define @test_vlseg3_nxv1f32(float* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg3_nxv1f32: @@ -4050,20 +3940,19 @@ ; CHECK-NEXT: vlseg3e32.v v7, (a0) ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; CHECK-NEXT: vlseg3e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1f32(float* %base, i64 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv1f32( %1, %1, %1, float* %base, %mask, i64 %vl) + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv1f32( %1, %1, %1, float* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlseg4.nxv1f32(float* , i64) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv1f32(,,,, float*, , i64) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv1f32(,,,, float*, , i64, i64) define @test_vlseg4_nxv1f32(float* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg4_nxv1f32: @@ -4086,20 +3975,19 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; CHECK-NEXT: vlseg4e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1f32(float* %base, i64 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv1f32( %1, %1, %1, %1, float* %base, %mask, i64 %vl) + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv1f32( %1, %1, %1, %1, float* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,,,,} @llvm.riscv.vlseg5.nxv1f32(float* , i64) -declare {,,,,} @llvm.riscv.vlseg5.mask.nxv1f32(,,,,, float*, , i64) +declare {,,,,} @llvm.riscv.vlseg5.mask.nxv1f32(,,,,, float*, , i64, i64) define @test_vlseg5_nxv1f32(float* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg5_nxv1f32: @@ -4123,20 +4011,19 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; CHECK-NEXT: vlseg5e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1f32(float* %base, i64 %vl) %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv1f32( %1, %1, %1, %1, %1, float* %base, %mask, i64 %vl) + %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv1f32( %1, %1, %1, %1, %1, float* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } declare {,,,,,} @llvm.riscv.vlseg6.nxv1f32(float* , i64) -declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv1f32(,,,,,, float*, , i64) +declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv1f32(,,,,,, float*, , i64, i64) define @test_vlseg6_nxv1f32(float* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg6_nxv1f32: @@ -4161,20 +4048,19 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; CHECK-NEXT: vlseg6e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1f32(float* %base, i64 %vl) %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv1f32( %1, %1, %1, %1, %1, %1, float* %base, %mask, i64 %vl) + %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv1f32( %1, %1, %1, %1, %1, %1, float* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } declare {,,,,,,} @llvm.riscv.vlseg7.nxv1f32(float* , i64) -declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1f32(,,,,,,, float*, , i64) +declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1f32(,,,,,,, float*, , i64, i64) define @test_vlseg7_nxv1f32(float* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg7_nxv1f32: @@ -4200,20 +4086,19 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; CHECK-NEXT: vlseg7e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1f32(float* %base, i64 %vl) %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1f32( %1, %1, %1, %1, %1, %1, %1, float* %base, %mask, i64 %vl) + %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1f32( %1, %1, %1, %1, %1, %1, %1, float* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } declare {,,,,,,,} @llvm.riscv.vlseg8.nxv1f32(float* , i64) -declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1f32(,,,,,,,, float*, , i64) +declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1f32(,,,,,,,, float*, , i64, i64) define @test_vlseg8_nxv1f32(float* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg8_nxv1f32: @@ -4240,20 +4125,19 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; CHECK-NEXT: vlseg8e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1f32(float* %base, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1f32( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %mask, i64 %vl) + %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1f32( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlseg2.nxv8f16(half* , i64) -declare {,} @llvm.riscv.vlseg2.mask.nxv8f16(,, half*, , i64) +declare {,} @llvm.riscv.vlseg2.mask.nxv8f16(,, half*, , i64, i64) define @test_vlseg2_nxv8f16(half* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg2_nxv8f16: @@ -4274,20 +4158,19 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vlseg2e16.v v6, (a0) ; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, tu, mu ; CHECK-NEXT: vlseg2e16.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv8f16(half* %base, i64 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv8f16( %1, %1, half* %base, %mask, i64 %vl) + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv8f16( %1, %1, half* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlseg3.nxv8f16(half* , i64) -declare {,,} @llvm.riscv.vlseg3.mask.nxv8f16(,,, half*, , i64) +declare {,,} @llvm.riscv.vlseg3.mask.nxv8f16(,,, half*, , i64, i64) define @test_vlseg3_nxv8f16(half* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg3_nxv8f16: @@ -4309,20 +4192,19 @@ ; CHECK-NEXT: vlseg3e16.v v6, (a0) ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, tu, mu ; CHECK-NEXT: vlseg3e16.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv8f16(half* %base, i64 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv8f16( %1, %1, %1, half* %base, %mask, i64 %vl) + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv8f16( %1, %1, %1, half* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlseg4.nxv8f16(half* , i64) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv8f16(,,,, half*, , i64) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv8f16(,,,, half*, , i64, i64) define @test_vlseg4_nxv8f16(half* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg4_nxv8f16: @@ -4345,20 +4227,19 @@ ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, tu, mu ; CHECK-NEXT: vlseg4e16.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv8f16(half* %base, i64 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv8f16( %1, %1, %1, %1, half* %base, %mask, i64 %vl) + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv8f16( %1, %1, %1, %1, half* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlseg2.nxv8f32(float* , i64) -declare {,} @llvm.riscv.vlseg2.mask.nxv8f32(,, float*, , i64) +declare {,} @llvm.riscv.vlseg2.mask.nxv8f32(,, float*, , i64, i64) define @test_vlseg2_nxv8f32(float* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg2_nxv8f32: @@ -4379,20 +4260,19 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vlseg2e32.v v4, (a0) ; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: vsetvli zero, zero, e32, m4, tu, mu ; CHECK-NEXT: vlseg2e32.v v4, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv8f32(float* %base, i64 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv8f32( %1, %1, float* %base, %mask, i64 %vl) + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv8f32( %1, %1, float* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlseg2.nxv2f64(double* , i64) -declare {,} @llvm.riscv.vlseg2.mask.nxv2f64(,, double*, , i64) +declare {,} @llvm.riscv.vlseg2.mask.nxv2f64(,, double*, , i64, i64) define @test_vlseg2_nxv2f64(double* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg2_nxv2f64: @@ -4413,20 +4293,19 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vlseg2e64.v v6, (a0) ; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu ; CHECK-NEXT: vlseg2e64.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv2f64(double* %base, i64 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv2f64( %1, %1, double* %base, %mask, i64 %vl) + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv2f64( %1, %1, double* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlseg3.nxv2f64(double* , i64) -declare {,,} @llvm.riscv.vlseg3.mask.nxv2f64(,,, double*, , i64) +declare {,,} @llvm.riscv.vlseg3.mask.nxv2f64(,,, double*, , i64, i64) define @test_vlseg3_nxv2f64(double* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg3_nxv2f64: @@ -4448,20 +4327,19 @@ ; CHECK-NEXT: vlseg3e64.v v6, (a0) ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu ; CHECK-NEXT: vlseg3e64.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2f64(double* %base, i64 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv2f64( %1, %1, %1, double* %base, %mask, i64 %vl) + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv2f64( %1, %1, %1, double* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlseg4.nxv2f64(double* , i64) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv2f64(,,,, double*, , i64) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv2f64(,,,, double*, , i64, i64) define @test_vlseg4_nxv2f64(double* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg4_nxv2f64: @@ -4484,20 +4362,19 @@ ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu ; CHECK-NEXT: vlseg4e64.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2f64(double* %base, i64 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv2f64( %1, %1, %1, %1, double* %base, %mask, i64 %vl) + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv2f64( %1, %1, %1, %1, double* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlseg2.nxv4f16(half* , i64) -declare {,} @llvm.riscv.vlseg2.mask.nxv4f16(,, half*, , i64) +declare {,} @llvm.riscv.vlseg2.mask.nxv4f16(,, half*, , i64, i64) define @test_vlseg2_nxv4f16(half* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg2_nxv4f16: @@ -4518,20 +4395,19 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg2e16.v v7, (a0) ; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv4f16(half* %base, i64 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv4f16( %1, %1, half* %base, %mask, i64 %vl) + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv4f16( %1, %1, half* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlseg3.nxv4f16(half* , i64) -declare {,,} @llvm.riscv.vlseg3.mask.nxv4f16(,,, half*, , i64) +declare {,,} @llvm.riscv.vlseg3.mask.nxv4f16(,,, half*, , i64, i64) define @test_vlseg3_nxv4f16(half* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg3_nxv4f16: @@ -4553,20 +4429,19 @@ ; CHECK-NEXT: vlseg3e16.v v7, (a0) ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4f16(half* %base, i64 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv4f16( %1, %1, %1, half* %base, %mask, i64 %vl) + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv4f16( %1, %1, %1, half* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlseg4.nxv4f16(half* , i64) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv4f16(,,,, half*, , i64) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv4f16(,,,, half*, , i64, i64) define @test_vlseg4_nxv4f16(half* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg4_nxv4f16: @@ -4589,20 +4464,19 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4f16(half* %base, i64 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv4f16( %1, %1, %1, %1, half* %base, %mask, i64 %vl) + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv4f16( %1, %1, %1, %1, half* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,,,,} @llvm.riscv.vlseg5.nxv4f16(half* , i64) -declare {,,,,} @llvm.riscv.vlseg5.mask.nxv4f16(,,,,, half*, , i64) +declare {,,,,} @llvm.riscv.vlseg5.mask.nxv4f16(,,,,, half*, , i64, i64) define @test_vlseg5_nxv4f16(half* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg5_nxv4f16: @@ -4626,20 +4500,19 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv4f16(half* %base, i64 %vl) %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv4f16( %1, %1, %1, %1, %1, half* %base, %mask, i64 %vl) + %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv4f16( %1, %1, %1, %1, %1, half* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } declare {,,,,,} @llvm.riscv.vlseg6.nxv4f16(half* , i64) -declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv4f16(,,,,,, half*, , i64) +declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv4f16(,,,,,, half*, , i64, i64) define @test_vlseg6_nxv4f16(half* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg6_nxv4f16: @@ -4664,20 +4537,19 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv4f16(half* %base, i64 %vl) %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv4f16( %1, %1, %1, %1, %1, %1, half* %base, %mask, i64 %vl) + %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv4f16( %1, %1, %1, %1, %1, %1, half* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } declare {,,,,,,} @llvm.riscv.vlseg7.nxv4f16(half* , i64) -declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv4f16(,,,,,,, half*, , i64) +declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv4f16(,,,,,,, half*, , i64, i64) define @test_vlseg7_nxv4f16(half* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg7_nxv4f16: @@ -4703,20 +4575,19 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv4f16(half* %base, i64 %vl) %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv4f16( %1, %1, %1, %1, %1, %1, %1, half* %base, %mask, i64 %vl) + %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv4f16( %1, %1, %1, %1, %1, %1, %1, half* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } declare {,,,,,,,} @llvm.riscv.vlseg8.nxv4f16(half* , i64) -declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv4f16(,,,,,,,, half*, , i64) +declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv4f16(,,,,,,,, half*, , i64, i64) define @test_vlseg8_nxv4f16(half* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg8_nxv4f16: @@ -4743,20 +4614,19 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv4f16(half* %base, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv4f16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %mask, i64 %vl) + %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv4f16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlseg2.nxv2f16(half* , i64) -declare {,} @llvm.riscv.vlseg2.mask.nxv2f16(,, half*, , i64) +declare {,} @llvm.riscv.vlseg2.mask.nxv2f16(,, half*, , i64, i64) define @test_vlseg2_nxv2f16(half* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg2_nxv2f16: @@ -4777,20 +4647,19 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg2e16.v v7, (a0) ; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu ; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv2f16(half* %base, i64 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv2f16( %1, %1, half* %base, %mask, i64 %vl) + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv2f16( %1, %1, half* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlseg3.nxv2f16(half* , i64) -declare {,,} @llvm.riscv.vlseg3.mask.nxv2f16(,,, half*, , i64) +declare {,,} @llvm.riscv.vlseg3.mask.nxv2f16(,,, half*, , i64, i64) define @test_vlseg3_nxv2f16(half* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg3_nxv2f16: @@ -4812,20 +4681,19 @@ ; CHECK-NEXT: vlseg3e16.v v7, (a0) ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu ; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2f16(half* %base, i64 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv2f16( %1, %1, %1, half* %base, %mask, i64 %vl) + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv2f16( %1, %1, %1, half* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlseg4.nxv2f16(half* , i64) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv2f16(,,,, half*, , i64) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv2f16(,,,, half*, , i64, i64) define @test_vlseg4_nxv2f16(half* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg4_nxv2f16: @@ -4848,20 +4716,19 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu ; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2f16(half* %base, i64 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv2f16( %1, %1, %1, %1, half* %base, %mask, i64 %vl) + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv2f16( %1, %1, %1, %1, half* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,,,,} @llvm.riscv.vlseg5.nxv2f16(half* , i64) -declare {,,,,} @llvm.riscv.vlseg5.mask.nxv2f16(,,,,, half*, , i64) +declare {,,,,} @llvm.riscv.vlseg5.mask.nxv2f16(,,,,, half*, , i64, i64) define @test_vlseg5_nxv2f16(half* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg5_nxv2f16: @@ -4885,20 +4752,19 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu ; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2f16(half* %base, i64 %vl) %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv2f16( %1, %1, %1, %1, %1, half* %base, %mask, i64 %vl) + %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv2f16( %1, %1, %1, %1, %1, half* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } declare {,,,,,} @llvm.riscv.vlseg6.nxv2f16(half* , i64) -declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv2f16(,,,,,, half*, , i64) +declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv2f16(,,,,,, half*, , i64, i64) define @test_vlseg6_nxv2f16(half* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg6_nxv2f16: @@ -4923,20 +4789,19 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu ; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2f16(half* %base, i64 %vl) %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv2f16( %1, %1, %1, %1, %1, %1, half* %base, %mask, i64 %vl) + %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv2f16( %1, %1, %1, %1, %1, %1, half* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } declare {,,,,,,} @llvm.riscv.vlseg7.nxv2f16(half* , i64) -declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2f16(,,,,,,, half*, , i64) +declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2f16(,,,,,,, half*, , i64, i64) define @test_vlseg7_nxv2f16(half* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg7_nxv2f16: @@ -4962,20 +4827,19 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu ; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2f16(half* %base, i64 %vl) %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2f16( %1, %1, %1, %1, %1, %1, %1, half* %base, %mask, i64 %vl) + %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2f16( %1, %1, %1, %1, %1, %1, %1, half* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } declare {,,,,,,,} @llvm.riscv.vlseg8.nxv2f16(half* , i64) -declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2f16(,,,,,,,, half*, , i64) +declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2f16(,,,,,,,, half*, , i64, i64) define @test_vlseg8_nxv2f16(half* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg8_nxv2f16: @@ -5002,20 +4866,19 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu ; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2f16(half* %base, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2f16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %mask, i64 %vl) + %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2f16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlseg2.nxv4f32(float* , i64) -declare {,} @llvm.riscv.vlseg2.mask.nxv4f32(,, float*, , i64) +declare {,} @llvm.riscv.vlseg2.mask.nxv4f32(,, float*, , i64, i64) define @test_vlseg2_nxv4f32(float* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg2_nxv4f32: @@ -5036,20 +4899,19 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vlseg2e32.v v6, (a0) ; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vsetvli zero, zero, e32, m2, tu, mu ; CHECK-NEXT: vlseg2e32.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv4f32(float* %base, i64 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv4f32( %1, %1, float* %base, %mask, i64 %vl) + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv4f32( %1, %1, float* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlseg3.nxv4f32(float* , i64) -declare {,,} @llvm.riscv.vlseg3.mask.nxv4f32(,,, float*, , i64) +declare {,,} @llvm.riscv.vlseg3.mask.nxv4f32(,,, float*, , i64, i64) define @test_vlseg3_nxv4f32(float* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg3_nxv4f32: @@ -5071,20 +4933,19 @@ ; CHECK-NEXT: vlseg3e32.v v6, (a0) ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vsetvli zero, zero, e32, m2, tu, mu ; CHECK-NEXT: vlseg3e32.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4f32(float* %base, i64 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv4f32( %1, %1, %1, float* %base, %mask, i64 %vl) + %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv4f32( %1, %1, %1, float* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlseg4.nxv4f32(float* , i64) -declare {,,,} @llvm.riscv.vlseg4.mask.nxv4f32(,,,, float*, , i64) +declare {,,,} @llvm.riscv.vlseg4.mask.nxv4f32(,,,, float*, , i64, i64) define @test_vlseg4_nxv4f32(float* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg4_nxv4f32: @@ -5107,14 +4968,13 @@ ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vsetvli zero, zero, e32, m2, tu, mu ; CHECK-NEXT: vlseg4e32.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4f32(float* %base, i64 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv4f32( %1, %1, %1, %1, float* %base, %mask, i64 %vl) + %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv4f32( %1, %1, %1, %1, float* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32-dead.ll b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32-dead.ll --- a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32-dead.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32-dead.ll @@ -3,7 +3,7 @@ ; RUN: -verify-machineinstrs < %s | FileCheck %s declare {,, i32} @llvm.riscv.vlseg2ff.nxv16i16(i16* , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16i16(,, i16*, , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16i16(,, i16*, , i32, i32) define void @test_vlseg2ff_dead_value(i16* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg2ff_dead_value: @@ -25,13 +25,13 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16i16( %val, %val, i16* %base, %mask, i32 %vl) + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16i16( %val, %val, i16* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,, i32} %0, 2 store i32 %1, i32* %outvl ret void @@ -54,12 +54,12 @@ ; CHECK-LABEL: test_vlseg2ff_mask_dead_vl: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v4, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16i16( %val, %val, i16* %base, %mask, i32 %vl) + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16i16( %val, %val, i16* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,, i32} %0, 1 ret %1 } @@ -80,10 +80,10 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16i16( %val, %val, i16* %base, %mask, i32 %vl) + tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16i16( %val, %val, i16* %base, %mask, i32 %vl, i32 1) ret void } diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32.ll @@ -3,7 +3,7 @@ ; RUN: -verify-machineinstrs < %s | FileCheck %s declare {,, i32} @llvm.riscv.vlseg2ff.nxv16i16(i16* , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16i16(,, i16*, , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16i16(,, i16*, , i32, i32) define @test_vlseg2ff_nxv16i16(i16* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv16i16: @@ -26,14 +26,14 @@ ; CHECK-LABEL: test_vlseg2ff_mask_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v4, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16i16( %val, %val, i16* %base, %mask, i32 %vl) + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16i16( %val, %val, i16* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,, i32} %0, 1 %2 = extractvalue {,, i32} %0, 2 store i32 %2, i32* %outvl @@ -41,7 +41,7 @@ } declare {,, i32} @llvm.riscv.vlseg2ff.nxv1i8(i8* , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1i8(,, i8*, , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1i8(,, i8*, , i32, i32) define @test_vlseg2ff_nxv1i8(i8* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv1i8: @@ -64,14 +64,14 @@ ; CHECK-LABEL: test_vlseg2ff_mask_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vlseg2e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1i8( %val, %val, i8* %base, %mask, i32 %vl) + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1i8( %val, %val, i8* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,, i32} %0, 1 %2 = extractvalue {,, i32} %0, 2 store i32 %2, i32* %outvl @@ -79,7 +79,7 @@ } declare {,,, i32} @llvm.riscv.vlseg3ff.nxv1i8(i8* , i32) -declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv1i8(,,, i8*, , i32) +declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv1i8(,,, i8*, , i32, i32) define @test_vlseg3ff_nxv1i8(i8* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv1i8: @@ -103,14 +103,14 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vlseg3e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv1i8( %val, %val, %val, i8* %base, %mask, i32 %vl) + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv1i8( %val, %val, %val, i8* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,, i32} %0, 1 %2 = extractvalue {,,, i32} %0, 3 store i32 %2, i32* %outvl @@ -118,7 +118,7 @@ } declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv1i8(i8* , i32) -declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv1i8(,,,, i8*, , i32) +declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv1i8(,,,, i8*, , i32, i32) define @test_vlseg4ff_nxv1i8(i8* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv1i8: @@ -143,14 +143,14 @@ ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vlseg4e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv1i8( %val, %val, %val, %val, i8* %base, %mask, i32 %vl) + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv1i8( %val, %val, %val, %val, i8* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,, i32} %0, 1 %2 = extractvalue {,,,, i32} %0, 4 store i32 %2, i32* %outvl @@ -158,7 +158,7 @@ } declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv1i8(i8* , i32) -declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv1i8(,,,,, i8*, , i32) +declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv1i8(,,,,, i8*, , i32, i32) define @test_vlseg5ff_nxv1i8(i8* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv1i8: @@ -184,14 +184,14 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vlseg5e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv1i8( %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl) + %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv1i8( %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,, i32} %0, 1 %2 = extractvalue {,,,,, i32} %0, 5 store i32 %2, i32* %outvl @@ -199,7 +199,7 @@ } declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv1i8(i8* , i32) -declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv1i8(,,,,,, i8*, , i32) +declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv1i8(,,,,,, i8*, , i32, i32) define @test_vlseg6ff_nxv1i8(i8* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv1i8: @@ -226,14 +226,14 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vlseg6e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl) + %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,, i32} %0, 6 store i32 %2, i32* %outvl @@ -241,7 +241,7 @@ } declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv1i8(i8* , i32) -declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv1i8(,,,,,,, i8*, , i32) +declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv1i8(,,,,,,, i8*, , i32, i32) define @test_vlseg7ff_nxv1i8(i8* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv1i8: @@ -269,14 +269,14 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vlseg7e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl) + %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,,, i32} %0, 7 store i32 %2, i32* %outvl @@ -284,7 +284,7 @@ } declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv1i8(i8* , i32) -declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv1i8(,,,,,,,, i8*, , i32) +declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv1i8(,,,,,,,, i8*, , i32, i32) define @test_vlseg8ff_nxv1i8(i8* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv1i8: @@ -313,14 +313,14 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vlseg8e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl) + %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,,,, i32} %0, 8 store i32 %2, i32* %outvl @@ -328,7 +328,7 @@ } declare {,, i32} @llvm.riscv.vlseg2ff.nxv16i8(i8* , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16i8(,, i8*, , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16i8(,, i8*, , i32, i32) define @test_vlseg2ff_nxv16i8(i8* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv16i8: @@ -351,14 +351,14 @@ ; CHECK-LABEL: test_vlseg2ff_mask_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vlseg2e8ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16i8( %val, %val, i8* %base, %mask, i32 %vl) + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16i8( %val, %val, i8* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,, i32} %0, 1 %2 = extractvalue {,, i32} %0, 2 store i32 %2, i32* %outvl @@ -366,7 +366,7 @@ } declare {,,, i32} @llvm.riscv.vlseg3ff.nxv16i8(i8* , i32) -declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv16i8(,,, i8*, , i32) +declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv16i8(,,, i8*, , i32, i32) define @test_vlseg3ff_nxv16i8(i8* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv16i8: @@ -390,14 +390,14 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vlseg3e8ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv16i8( %val, %val, %val, i8* %base, %mask, i32 %vl) + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv16i8( %val, %val, %val, i8* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,, i32} %0, 1 %2 = extractvalue {,,, i32} %0, 3 store i32 %2, i32* %outvl @@ -405,7 +405,7 @@ } declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv16i8(i8* , i32) -declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv16i8(,,,, i8*, , i32) +declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv16i8(,,,, i8*, , i32, i32) define @test_vlseg4ff_nxv16i8(i8* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv16i8: @@ -430,14 +430,14 @@ ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vlseg4e8ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv16i8( %val, %val, %val, %val, i8* %base, %mask, i32 %vl) + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv16i8( %val, %val, %val, %val, i8* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,, i32} %0, 1 %2 = extractvalue {,,,, i32} %0, 4 store i32 %2, i32* %outvl @@ -445,7 +445,7 @@ } declare {,, i32} @llvm.riscv.vlseg2ff.nxv2i32(i32* , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2i32(,, i32*, , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2i32(,, i32*, , i32, i32) define @test_vlseg2ff_nxv2i32(i32* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv2i32: @@ -468,14 +468,14 @@ ; CHECK-LABEL: test_vlseg2ff_mask_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg2e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2i32( %val, %val, i32* %base, %mask, i32 %vl) + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2i32( %val, %val, i32* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,, i32} %0, 1 %2 = extractvalue {,, i32} %0, 2 store i32 %2, i32* %outvl @@ -483,7 +483,7 @@ } declare {,,, i32} @llvm.riscv.vlseg3ff.nxv2i32(i32* , i32) -declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv2i32(,,, i32*, , i32) +declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv2i32(,,, i32*, , i32, i32) define @test_vlseg3ff_nxv2i32(i32* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv2i32: @@ -507,14 +507,14 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg3e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv2i32( %val, %val, %val, i32* %base, %mask, i32 %vl) + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv2i32( %val, %val, %val, i32* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,, i32} %0, 1 %2 = extractvalue {,,, i32} %0, 3 store i32 %2, i32* %outvl @@ -522,7 +522,7 @@ } declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv2i32(i32* , i32) -declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv2i32(,,,, i32*, , i32) +declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv2i32(,,,, i32*, , i32, i32) define @test_vlseg4ff_nxv2i32(i32* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv2i32: @@ -547,14 +547,14 @@ ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg4e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv2i32( %val, %val, %val, %val, i32* %base, %mask, i32 %vl) + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv2i32( %val, %val, %val, %val, i32* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,, i32} %0, 1 %2 = extractvalue {,,,, i32} %0, 4 store i32 %2, i32* %outvl @@ -562,7 +562,7 @@ } declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv2i32(i32* , i32) -declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv2i32(,,,,, i32*, , i32) +declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv2i32(,,,,, i32*, , i32, i32) define @test_vlseg5ff_nxv2i32(i32* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv2i32: @@ -588,14 +588,14 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg5e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv2i32( %val, %val, %val, %val, %val, i32* %base, %mask, i32 %vl) + %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv2i32( %val, %val, %val, %val, %val, i32* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,, i32} %0, 1 %2 = extractvalue {,,,,, i32} %0, 5 store i32 %2, i32* %outvl @@ -603,7 +603,7 @@ } declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv2i32(i32* , i32) -declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv2i32(,,,,,, i32*, , i32) +declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv2i32(,,,,,, i32*, , i32, i32) define @test_vlseg6ff_nxv2i32(i32* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv2i32: @@ -630,14 +630,14 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg6e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv2i32( %val, %val, %val, %val, %val, %val, i32* %base, %mask, i32 %vl) + %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv2i32( %val, %val, %val, %val, %val, %val, i32* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,, i32} %0, 6 store i32 %2, i32* %outvl @@ -645,7 +645,7 @@ } declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv2i32(i32* , i32) -declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv2i32(,,,,,,, i32*, , i32) +declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv2i32(,,,,,,, i32*, , i32, i32) define @test_vlseg7ff_nxv2i32(i32* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv2i32: @@ -673,14 +673,14 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg7e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %mask, i32 %vl) + %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,,, i32} %0, 7 store i32 %2, i32* %outvl @@ -688,7 +688,7 @@ } declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv2i32(i32* , i32) -declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv2i32(,,,,,,,, i32*, , i32) +declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv2i32(,,,,,,,, i32*, , i32, i32) define @test_vlseg8ff_nxv2i32(i32* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv2i32: @@ -717,14 +717,14 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg8e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %mask, i32 %vl) + %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,,,, i32} %0, 8 store i32 %2, i32* %outvl @@ -732,7 +732,7 @@ } declare {,, i32} @llvm.riscv.vlseg2ff.nxv4i16(i16* , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4i16(,, i16*, , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4i16(,, i16*, , i32, i32) define @test_vlseg2ff_nxv4i16(i16* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv4i16: @@ -755,14 +755,14 @@ ; CHECK-LABEL: test_vlseg2ff_mask_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4i16( %val, %val, i16* %base, %mask, i32 %vl) + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4i16( %val, %val, i16* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,, i32} %0, 1 %2 = extractvalue {,, i32} %0, 2 store i32 %2, i32* %outvl @@ -770,7 +770,7 @@ } declare {,,, i32} @llvm.riscv.vlseg3ff.nxv4i16(i16* , i32) -declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv4i16(,,, i16*, , i32) +declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv4i16(,,, i16*, , i32, i32) define @test_vlseg3ff_nxv4i16(i16* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv4i16: @@ -794,14 +794,14 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv4i16( %val, %val, %val, i16* %base, %mask, i32 %vl) + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv4i16( %val, %val, %val, i16* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,, i32} %0, 1 %2 = extractvalue {,,, i32} %0, 3 store i32 %2, i32* %outvl @@ -809,7 +809,7 @@ } declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv4i16(i16* , i32) -declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv4i16(,,,, i16*, , i32) +declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv4i16(,,,, i16*, , i32, i32) define @test_vlseg4ff_nxv4i16(i16* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv4i16: @@ -834,14 +834,14 @@ ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv4i16( %val, %val, %val, %val, i16* %base, %mask, i32 %vl) + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv4i16( %val, %val, %val, %val, i16* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,, i32} %0, 1 %2 = extractvalue {,,,, i32} %0, 4 store i32 %2, i32* %outvl @@ -849,7 +849,7 @@ } declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv4i16(i16* , i32) -declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv4i16(,,,,, i16*, , i32) +declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv4i16(,,,,, i16*, , i32, i32) define @test_vlseg5ff_nxv4i16(i16* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv4i16: @@ -875,14 +875,14 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv4i16( %val, %val, %val, %val, %val, i16* %base, %mask, i32 %vl) + %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv4i16( %val, %val, %val, %val, %val, i16* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,, i32} %0, 1 %2 = extractvalue {,,,,, i32} %0, 5 store i32 %2, i32* %outvl @@ -890,7 +890,7 @@ } declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv4i16(i16* , i32) -declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv4i16(,,,,,, i16*, , i32) +declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv4i16(,,,,,, i16*, , i32, i32) define @test_vlseg6ff_nxv4i16(i16* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv4i16: @@ -917,14 +917,14 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv4i16( %val, %val, %val, %val, %val, %val, i16* %base, %mask, i32 %vl) + %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv4i16( %val, %val, %val, %val, %val, %val, i16* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,, i32} %0, 6 store i32 %2, i32* %outvl @@ -932,7 +932,7 @@ } declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv4i16(i16* , i32) -declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv4i16(,,,,,,, i16*, , i32) +declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv4i16(,,,,,,, i16*, , i32, i32) define @test_vlseg7ff_nxv4i16(i16* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv4i16: @@ -960,14 +960,14 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %mask, i32 %vl) + %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,,, i32} %0, 7 store i32 %2, i32* %outvl @@ -975,7 +975,7 @@ } declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv4i16(i16* , i32) -declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv4i16(,,,,,,,, i16*, , i32) +declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv4i16(,,,,,,,, i16*, , i32, i32) define @test_vlseg8ff_nxv4i16(i16* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv4i16: @@ -1004,14 +1004,14 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %mask, i32 %vl) + %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,,,, i32} %0, 8 store i32 %2, i32* %outvl @@ -1019,7 +1019,7 @@ } declare {,, i32} @llvm.riscv.vlseg2ff.nxv1i32(i32* , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1i32(,, i32*, , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1i32(,, i32*, , i32, i32) define @test_vlseg2ff_nxv1i32(i32* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv1i32: @@ -1042,14 +1042,14 @@ ; CHECK-LABEL: test_vlseg2ff_mask_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg2e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1i32( %val, %val, i32* %base, %mask, i32 %vl) + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1i32( %val, %val, i32* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,, i32} %0, 1 %2 = extractvalue {,, i32} %0, 2 store i32 %2, i32* %outvl @@ -1057,7 +1057,7 @@ } declare {,,, i32} @llvm.riscv.vlseg3ff.nxv1i32(i32* , i32) -declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv1i32(,,, i32*, , i32) +declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv1i32(,,, i32*, , i32, i32) define @test_vlseg3ff_nxv1i32(i32* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv1i32: @@ -1081,14 +1081,14 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg3e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv1i32( %val, %val, %val, i32* %base, %mask, i32 %vl) + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv1i32( %val, %val, %val, i32* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,, i32} %0, 1 %2 = extractvalue {,,, i32} %0, 3 store i32 %2, i32* %outvl @@ -1096,7 +1096,7 @@ } declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv1i32(i32* , i32) -declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv1i32(,,,, i32*, , i32) +declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv1i32(,,,, i32*, , i32, i32) define @test_vlseg4ff_nxv1i32(i32* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv1i32: @@ -1121,14 +1121,14 @@ ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg4e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv1i32( %val, %val, %val, %val, i32* %base, %mask, i32 %vl) + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv1i32( %val, %val, %val, %val, i32* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,, i32} %0, 1 %2 = extractvalue {,,,, i32} %0, 4 store i32 %2, i32* %outvl @@ -1136,7 +1136,7 @@ } declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv1i32(i32* , i32) -declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv1i32(,,,,, i32*, , i32) +declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv1i32(,,,,, i32*, , i32, i32) define @test_vlseg5ff_nxv1i32(i32* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv1i32: @@ -1162,14 +1162,14 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg5e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv1i32( %val, %val, %val, %val, %val, i32* %base, %mask, i32 %vl) + %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv1i32( %val, %val, %val, %val, %val, i32* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,, i32} %0, 1 %2 = extractvalue {,,,,, i32} %0, 5 store i32 %2, i32* %outvl @@ -1177,7 +1177,7 @@ } declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv1i32(i32* , i32) -declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv1i32(,,,,,, i32*, , i32) +declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv1i32(,,,,,, i32*, , i32, i32) define @test_vlseg6ff_nxv1i32(i32* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv1i32: @@ -1204,14 +1204,14 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg6e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv1i32( %val, %val, %val, %val, %val, %val, i32* %base, %mask, i32 %vl) + %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv1i32( %val, %val, %val, %val, %val, %val, i32* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,, i32} %0, 6 store i32 %2, i32* %outvl @@ -1219,7 +1219,7 @@ } declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv1i32(i32* , i32) -declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv1i32(,,,,,,, i32*, , i32) +declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv1i32(,,,,,,, i32*, , i32, i32) define @test_vlseg7ff_nxv1i32(i32* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv1i32: @@ -1247,14 +1247,14 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg7e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %mask, i32 %vl) + %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,,, i32} %0, 7 store i32 %2, i32* %outvl @@ -1262,7 +1262,7 @@ } declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv1i32(i32* , i32) -declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv1i32(,,,,,,,, i32*, , i32) +declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv1i32(,,,,,,,, i32*, , i32, i32) define @test_vlseg8ff_nxv1i32(i32* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv1i32: @@ -1291,14 +1291,14 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg8e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %mask, i32 %vl) + %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,,,, i32} %0, 8 store i32 %2, i32* %outvl @@ -1306,7 +1306,7 @@ } declare {,, i32} @llvm.riscv.vlseg2ff.nxv8i16(i16* , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8i16(,, i16*, , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8i16(,, i16*, , i32, i32) define @test_vlseg2ff_nxv8i16(i16* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv8i16: @@ -1329,14 +1329,14 @@ ; CHECK-LABEL: test_vlseg2ff_mask_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8i16( %val, %val, i16* %base, %mask, i32 %vl) + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8i16( %val, %val, i16* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,, i32} %0, 1 %2 = extractvalue {,, i32} %0, 2 store i32 %2, i32* %outvl @@ -1344,7 +1344,7 @@ } declare {,,, i32} @llvm.riscv.vlseg3ff.nxv8i16(i16* , i32) -declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv8i16(,,, i16*, , i32) +declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv8i16(,,, i16*, , i32, i32) define @test_vlseg3ff_nxv8i16(i16* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv8i16: @@ -1368,14 +1368,14 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vlseg3e16ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv8i16( %val, %val, %val, i16* %base, %mask, i32 %vl) + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv8i16( %val, %val, %val, i16* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,, i32} %0, 1 %2 = extractvalue {,,, i32} %0, 3 store i32 %2, i32* %outvl @@ -1383,7 +1383,7 @@ } declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv8i16(i16* , i32) -declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv8i16(,,,, i16*, , i32) +declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv8i16(,,,, i16*, , i32, i32) define @test_vlseg4ff_nxv8i16(i16* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv8i16: @@ -1408,14 +1408,14 @@ ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vlseg4e16ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv8i16( %val, %val, %val, %val, i16* %base, %mask, i32 %vl) + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv8i16( %val, %val, %val, %val, i16* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,, i32} %0, 1 %2 = extractvalue {,,,, i32} %0, 4 store i32 %2, i32* %outvl @@ -1423,7 +1423,7 @@ } declare {,, i32} @llvm.riscv.vlseg2ff.nxv8i8(i8* , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8i8(,, i8*, , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8i8(,, i8*, , i32, i32) define @test_vlseg2ff_nxv8i8(i8* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv8i8: @@ -1446,14 +1446,14 @@ ; CHECK-LABEL: test_vlseg2ff_mask_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vlseg2e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8i8( %val, %val, i8* %base, %mask, i32 %vl) + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8i8( %val, %val, i8* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,, i32} %0, 1 %2 = extractvalue {,, i32} %0, 2 store i32 %2, i32* %outvl @@ -1461,7 +1461,7 @@ } declare {,,, i32} @llvm.riscv.vlseg3ff.nxv8i8(i8* , i32) -declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv8i8(,,, i8*, , i32) +declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv8i8(,,, i8*, , i32, i32) define @test_vlseg3ff_nxv8i8(i8* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv8i8: @@ -1485,14 +1485,14 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vlseg3e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv8i8( %val, %val, %val, i8* %base, %mask, i32 %vl) + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv8i8( %val, %val, %val, i8* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,, i32} %0, 1 %2 = extractvalue {,,, i32} %0, 3 store i32 %2, i32* %outvl @@ -1500,7 +1500,7 @@ } declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv8i8(i8* , i32) -declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv8i8(,,,, i8*, , i32) +declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv8i8(,,,, i8*, , i32, i32) define @test_vlseg4ff_nxv8i8(i8* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv8i8: @@ -1525,14 +1525,14 @@ ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vlseg4e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv8i8( %val, %val, %val, %val, i8* %base, %mask, i32 %vl) + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv8i8( %val, %val, %val, %val, i8* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,, i32} %0, 1 %2 = extractvalue {,,,, i32} %0, 4 store i32 %2, i32* %outvl @@ -1540,7 +1540,7 @@ } declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv8i8(i8* , i32) -declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv8i8(,,,,, i8*, , i32) +declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv8i8(,,,,, i8*, , i32, i32) define @test_vlseg5ff_nxv8i8(i8* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv8i8: @@ -1566,14 +1566,14 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vlseg5e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv8i8( %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl) + %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv8i8( %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,, i32} %0, 1 %2 = extractvalue {,,,,, i32} %0, 5 store i32 %2, i32* %outvl @@ -1581,7 +1581,7 @@ } declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv8i8(i8* , i32) -declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv8i8(,,,,,, i8*, , i32) +declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv8i8(,,,,,, i8*, , i32, i32) define @test_vlseg6ff_nxv8i8(i8* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv8i8: @@ -1608,14 +1608,14 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vlseg6e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl) + %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,, i32} %0, 6 store i32 %2, i32* %outvl @@ -1623,7 +1623,7 @@ } declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv8i8(i8* , i32) -declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv8i8(,,,,,,, i8*, , i32) +declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv8i8(,,,,,,, i8*, , i32, i32) define @test_vlseg7ff_nxv8i8(i8* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv8i8: @@ -1651,14 +1651,14 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vlseg7e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl) + %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,,, i32} %0, 7 store i32 %2, i32* %outvl @@ -1666,7 +1666,7 @@ } declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv8i8(i8* , i32) -declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv8i8(,,,,,,,, i8*, , i32) +declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv8i8(,,,,,,,, i8*, , i32, i32) define @test_vlseg8ff_nxv8i8(i8* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv8i8: @@ -1695,14 +1695,14 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vlseg8e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl) + %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,,,, i32} %0, 8 store i32 %2, i32* %outvl @@ -1710,7 +1710,7 @@ } declare {,, i32} @llvm.riscv.vlseg2ff.nxv8i32(i32* , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8i32(,, i32*, , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8i32(,, i32*, , i32, i32) define @test_vlseg2ff_nxv8i32(i32* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv8i32: @@ -1733,14 +1733,14 @@ ; CHECK-LABEL: test_vlseg2ff_mask_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vlseg2e32ff.v v4, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8i32( %val, %val, i32* %base, %mask, i32 %vl) + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8i32( %val, %val, i32* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,, i32} %0, 1 %2 = extractvalue {,, i32} %0, 2 store i32 %2, i32* %outvl @@ -1748,7 +1748,7 @@ } declare {,, i32} @llvm.riscv.vlseg2ff.nxv4i8(i8* , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4i8(,, i8*, , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4i8(,, i8*, , i32, i32) define @test_vlseg2ff_nxv4i8(i8* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv4i8: @@ -1771,14 +1771,14 @@ ; CHECK-LABEL: test_vlseg2ff_mask_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vlseg2e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4i8( %val, %val, i8* %base, %mask, i32 %vl) + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4i8( %val, %val, i8* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,, i32} %0, 1 %2 = extractvalue {,, i32} %0, 2 store i32 %2, i32* %outvl @@ -1786,7 +1786,7 @@ } declare {,,, i32} @llvm.riscv.vlseg3ff.nxv4i8(i8* , i32) -declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv4i8(,,, i8*, , i32) +declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv4i8(,,, i8*, , i32, i32) define @test_vlseg3ff_nxv4i8(i8* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv4i8: @@ -1810,14 +1810,14 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vlseg3e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv4i8( %val, %val, %val, i8* %base, %mask, i32 %vl) + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv4i8( %val, %val, %val, i8* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,, i32} %0, 1 %2 = extractvalue {,,, i32} %0, 3 store i32 %2, i32* %outvl @@ -1825,7 +1825,7 @@ } declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv4i8(i8* , i32) -declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv4i8(,,,, i8*, , i32) +declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv4i8(,,,, i8*, , i32, i32) define @test_vlseg4ff_nxv4i8(i8* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv4i8: @@ -1850,14 +1850,14 @@ ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vlseg4e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv4i8( %val, %val, %val, %val, i8* %base, %mask, i32 %vl) + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv4i8( %val, %val, %val, %val, i8* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,, i32} %0, 1 %2 = extractvalue {,,,, i32} %0, 4 store i32 %2, i32* %outvl @@ -1865,7 +1865,7 @@ } declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv4i8(i8* , i32) -declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv4i8(,,,,, i8*, , i32) +declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv4i8(,,,,, i8*, , i32, i32) define @test_vlseg5ff_nxv4i8(i8* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv4i8: @@ -1891,14 +1891,14 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vlseg5e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv4i8( %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl) + %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv4i8( %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,, i32} %0, 1 %2 = extractvalue {,,,,, i32} %0, 5 store i32 %2, i32* %outvl @@ -1906,7 +1906,7 @@ } declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv4i8(i8* , i32) -declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv4i8(,,,,,, i8*, , i32) +declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv4i8(,,,,,, i8*, , i32, i32) define @test_vlseg6ff_nxv4i8(i8* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv4i8: @@ -1933,14 +1933,14 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vlseg6e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl) + %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,, i32} %0, 6 store i32 %2, i32* %outvl @@ -1948,7 +1948,7 @@ } declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv4i8(i8* , i32) -declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv4i8(,,,,,,, i8*, , i32) +declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv4i8(,,,,,,, i8*, , i32, i32) define @test_vlseg7ff_nxv4i8(i8* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv4i8: @@ -1976,14 +1976,14 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vlseg7e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl) + %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,,, i32} %0, 7 store i32 %2, i32* %outvl @@ -1991,7 +1991,7 @@ } declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv4i8(i8* , i32) -declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv4i8(,,,,,,,, i8*, , i32) +declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv4i8(,,,,,,,, i8*, , i32, i32) define @test_vlseg8ff_nxv4i8(i8* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv4i8: @@ -2020,14 +2020,14 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vlseg8e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl) + %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,,,, i32} %0, 8 store i32 %2, i32* %outvl @@ -2035,7 +2035,7 @@ } declare {,, i32} @llvm.riscv.vlseg2ff.nxv1i16(i16* , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1i16(,, i16*, , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1i16(,, i16*, , i32, i32) define @test_vlseg2ff_nxv1i16(i16* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv1i16: @@ -2058,14 +2058,14 @@ ; CHECK-LABEL: test_vlseg2ff_mask_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1i16( %val, %val, i16* %base, %mask, i32 %vl) + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1i16( %val, %val, i16* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,, i32} %0, 1 %2 = extractvalue {,, i32} %0, 2 store i32 %2, i32* %outvl @@ -2073,7 +2073,7 @@ } declare {,,, i32} @llvm.riscv.vlseg3ff.nxv1i16(i16* , i32) -declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv1i16(,,, i16*, , i32) +declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv1i16(,,, i16*, , i32, i32) define @test_vlseg3ff_nxv1i16(i16* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv1i16: @@ -2097,14 +2097,14 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv1i16( %val, %val, %val, i16* %base, %mask, i32 %vl) + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv1i16( %val, %val, %val, i16* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,, i32} %0, 1 %2 = extractvalue {,,, i32} %0, 3 store i32 %2, i32* %outvl @@ -2112,7 +2112,7 @@ } declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv1i16(i16* , i32) -declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv1i16(,,,, i16*, , i32) +declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv1i16(,,,, i16*, , i32, i32) define @test_vlseg4ff_nxv1i16(i16* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv1i16: @@ -2137,14 +2137,14 @@ ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv1i16( %val, %val, %val, %val, i16* %base, %mask, i32 %vl) + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv1i16( %val, %val, %val, %val, i16* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,, i32} %0, 1 %2 = extractvalue {,,,, i32} %0, 4 store i32 %2, i32* %outvl @@ -2152,7 +2152,7 @@ } declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv1i16(i16* , i32) -declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv1i16(,,,,, i16*, , i32) +declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv1i16(,,,,, i16*, , i32, i32) define @test_vlseg5ff_nxv1i16(i16* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv1i16: @@ -2178,14 +2178,14 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv1i16( %val, %val, %val, %val, %val, i16* %base, %mask, i32 %vl) + %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv1i16( %val, %val, %val, %val, %val, i16* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,, i32} %0, 1 %2 = extractvalue {,,,,, i32} %0, 5 store i32 %2, i32* %outvl @@ -2193,7 +2193,7 @@ } declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv1i16(i16* , i32) -declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv1i16(,,,,,, i16*, , i32) +declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv1i16(,,,,,, i16*, , i32, i32) define @test_vlseg6ff_nxv1i16(i16* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv1i16: @@ -2220,14 +2220,14 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv1i16( %val, %val, %val, %val, %val, %val, i16* %base, %mask, i32 %vl) + %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv1i16( %val, %val, %val, %val, %val, %val, i16* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,, i32} %0, 6 store i32 %2, i32* %outvl @@ -2235,7 +2235,7 @@ } declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv1i16(i16* , i32) -declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv1i16(,,,,,,, i16*, , i32) +declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv1i16(,,,,,,, i16*, , i32, i32) define @test_vlseg7ff_nxv1i16(i16* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv1i16: @@ -2263,14 +2263,14 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %mask, i32 %vl) + %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,,, i32} %0, 7 store i32 %2, i32* %outvl @@ -2278,7 +2278,7 @@ } declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv1i16(i16* , i32) -declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv1i16(,,,,,,,, i16*, , i32) +declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv1i16(,,,,,,,, i16*, , i32, i32) define @test_vlseg8ff_nxv1i16(i16* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv1i16: @@ -2307,14 +2307,14 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %mask, i32 %vl) + %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,,,, i32} %0, 8 store i32 %2, i32* %outvl @@ -2322,7 +2322,7 @@ } declare {,, i32} @llvm.riscv.vlseg2ff.nxv32i8(i8* , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv32i8(,, i8*, , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv32i8(,, i8*, , i32, i32) define @test_vlseg2ff_nxv32i8(i8* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv32i8: @@ -2345,14 +2345,14 @@ ; CHECK-LABEL: test_vlseg2ff_mask_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vlseg2e8ff.v v4, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv32i8( %val, %val, i8* %base, %mask, i32 %vl) + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv32i8( %val, %val, i8* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,, i32} %0, 1 %2 = extractvalue {,, i32} %0, 2 store i32 %2, i32* %outvl @@ -2360,7 +2360,7 @@ } declare {,, i32} @llvm.riscv.vlseg2ff.nxv2i8(i8* , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2i8(,, i8*, , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2i8(,, i8*, , i32, i32) define @test_vlseg2ff_nxv2i8(i8* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv2i8: @@ -2383,14 +2383,14 @@ ; CHECK-LABEL: test_vlseg2ff_mask_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vlseg2e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2i8( %val, %val, i8* %base, %mask, i32 %vl) + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2i8( %val, %val, i8* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,, i32} %0, 1 %2 = extractvalue {,, i32} %0, 2 store i32 %2, i32* %outvl @@ -2398,7 +2398,7 @@ } declare {,,, i32} @llvm.riscv.vlseg3ff.nxv2i8(i8* , i32) -declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv2i8(,,, i8*, , i32) +declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv2i8(,,, i8*, , i32, i32) define @test_vlseg3ff_nxv2i8(i8* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv2i8: @@ -2422,14 +2422,14 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vlseg3e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv2i8( %val, %val, %val, i8* %base, %mask, i32 %vl) + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv2i8( %val, %val, %val, i8* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,, i32} %0, 1 %2 = extractvalue {,,, i32} %0, 3 store i32 %2, i32* %outvl @@ -2437,7 +2437,7 @@ } declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv2i8(i8* , i32) -declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv2i8(,,,, i8*, , i32) +declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv2i8(,,,, i8*, , i32, i32) define @test_vlseg4ff_nxv2i8(i8* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv2i8: @@ -2462,14 +2462,14 @@ ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vlseg4e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv2i8( %val, %val, %val, %val, i8* %base, %mask, i32 %vl) + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv2i8( %val, %val, %val, %val, i8* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,, i32} %0, 1 %2 = extractvalue {,,,, i32} %0, 4 store i32 %2, i32* %outvl @@ -2477,7 +2477,7 @@ } declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv2i8(i8* , i32) -declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv2i8(,,,,, i8*, , i32) +declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv2i8(,,,,, i8*, , i32, i32) define @test_vlseg5ff_nxv2i8(i8* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv2i8: @@ -2503,14 +2503,14 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vlseg5e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv2i8( %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl) + %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv2i8( %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,, i32} %0, 1 %2 = extractvalue {,,,,, i32} %0, 5 store i32 %2, i32* %outvl @@ -2518,7 +2518,7 @@ } declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv2i8(i8* , i32) -declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv2i8(,,,,,, i8*, , i32) +declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv2i8(,,,,,, i8*, , i32, i32) define @test_vlseg6ff_nxv2i8(i8* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv2i8: @@ -2545,14 +2545,14 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vlseg6e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl) + %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,, i32} %0, 6 store i32 %2, i32* %outvl @@ -2560,7 +2560,7 @@ } declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv2i8(i8* , i32) -declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv2i8(,,,,,,, i8*, , i32) +declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv2i8(,,,,,,, i8*, , i32, i32) define @test_vlseg7ff_nxv2i8(i8* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv2i8: @@ -2588,14 +2588,14 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vlseg7e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl) + %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,,, i32} %0, 7 store i32 %2, i32* %outvl @@ -2603,7 +2603,7 @@ } declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv2i8(i8* , i32) -declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv2i8(,,,,,,,, i8*, , i32) +declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv2i8(,,,,,,,, i8*, , i32, i32) define @test_vlseg8ff_nxv2i8(i8* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv2i8: @@ -2632,14 +2632,14 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vlseg8e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl) + %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,,,, i32} %0, 8 store i32 %2, i32* %outvl @@ -2647,7 +2647,7 @@ } declare {,, i32} @llvm.riscv.vlseg2ff.nxv2i16(i16* , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2i16(,, i16*, , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2i16(,, i16*, , i32, i32) define @test_vlseg2ff_nxv2i16(i16* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv2i16: @@ -2670,14 +2670,14 @@ ; CHECK-LABEL: test_vlseg2ff_mask_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2i16( %val, %val, i16* %base, %mask, i32 %vl) + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2i16( %val, %val, i16* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,, i32} %0, 1 %2 = extractvalue {,, i32} %0, 2 store i32 %2, i32* %outvl @@ -2685,7 +2685,7 @@ } declare {,,, i32} @llvm.riscv.vlseg3ff.nxv2i16(i16* , i32) -declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv2i16(,,, i16*, , i32) +declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv2i16(,,, i16*, , i32, i32) define @test_vlseg3ff_nxv2i16(i16* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv2i16: @@ -2709,14 +2709,14 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv2i16( %val, %val, %val, i16* %base, %mask, i32 %vl) + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv2i16( %val, %val, %val, i16* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,, i32} %0, 1 %2 = extractvalue {,,, i32} %0, 3 store i32 %2, i32* %outvl @@ -2724,7 +2724,7 @@ } declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv2i16(i16* , i32) -declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv2i16(,,,, i16*, , i32) +declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv2i16(,,,, i16*, , i32, i32) define @test_vlseg4ff_nxv2i16(i16* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv2i16: @@ -2749,14 +2749,14 @@ ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv2i16( %val, %val, %val, %val, i16* %base, %mask, i32 %vl) + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv2i16( %val, %val, %val, %val, i16* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,, i32} %0, 1 %2 = extractvalue {,,,, i32} %0, 4 store i32 %2, i32* %outvl @@ -2764,7 +2764,7 @@ } declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv2i16(i16* , i32) -declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv2i16(,,,,, i16*, , i32) +declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv2i16(,,,,, i16*, , i32, i32) define @test_vlseg5ff_nxv2i16(i16* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv2i16: @@ -2790,14 +2790,14 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv2i16( %val, %val, %val, %val, %val, i16* %base, %mask, i32 %vl) + %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv2i16( %val, %val, %val, %val, %val, i16* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,, i32} %0, 1 %2 = extractvalue {,,,,, i32} %0, 5 store i32 %2, i32* %outvl @@ -2805,7 +2805,7 @@ } declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv2i16(i16* , i32) -declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv2i16(,,,,,, i16*, , i32) +declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv2i16(,,,,,, i16*, , i32, i32) define @test_vlseg6ff_nxv2i16(i16* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv2i16: @@ -2832,14 +2832,14 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv2i16( %val, %val, %val, %val, %val, %val, i16* %base, %mask, i32 %vl) + %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv2i16( %val, %val, %val, %val, %val, %val, i16* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,, i32} %0, 6 store i32 %2, i32* %outvl @@ -2847,7 +2847,7 @@ } declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv2i16(i16* , i32) -declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv2i16(,,,,,,, i16*, , i32) +declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv2i16(,,,,,,, i16*, , i32, i32) define @test_vlseg7ff_nxv2i16(i16* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv2i16: @@ -2875,14 +2875,14 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %mask, i32 %vl) + %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,,, i32} %0, 7 store i32 %2, i32* %outvl @@ -2890,7 +2890,7 @@ } declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv2i16(i16* , i32) -declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv2i16(,,,,,,,, i16*, , i32) +declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv2i16(,,,,,,,, i16*, , i32, i32) define @test_vlseg8ff_nxv2i16(i16* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv2i16: @@ -2919,14 +2919,14 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %mask, i32 %vl) + %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,,,, i32} %0, 8 store i32 %2, i32* %outvl @@ -2934,7 +2934,7 @@ } declare {,, i32} @llvm.riscv.vlseg2ff.nxv4i32(i32* , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4i32(,, i32*, , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4i32(,, i32*, , i32, i32) define @test_vlseg2ff_nxv4i32(i32* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv4i32: @@ -2957,14 +2957,14 @@ ; CHECK-LABEL: test_vlseg2ff_mask_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vlseg2e32ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4i32( %val, %val, i32* %base, %mask, i32 %vl) + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4i32( %val, %val, i32* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,, i32} %0, 1 %2 = extractvalue {,, i32} %0, 2 store i32 %2, i32* %outvl @@ -2972,7 +2972,7 @@ } declare {,,, i32} @llvm.riscv.vlseg3ff.nxv4i32(i32* , i32) -declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv4i32(,,, i32*, , i32) +declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv4i32(,,, i32*, , i32, i32) define @test_vlseg3ff_nxv4i32(i32* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv4i32: @@ -2996,14 +2996,14 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vlseg3e32ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv4i32( %val, %val, %val, i32* %base, %mask, i32 %vl) + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv4i32( %val, %val, %val, i32* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,, i32} %0, 1 %2 = extractvalue {,,, i32} %0, 3 store i32 %2, i32* %outvl @@ -3011,7 +3011,7 @@ } declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv4i32(i32* , i32) -declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv4i32(,,,, i32*, , i32) +declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv4i32(,,,, i32*, , i32, i32) define @test_vlseg4ff_nxv4i32(i32* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv4i32: @@ -3036,14 +3036,14 @@ ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vlseg4e32ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv4i32( %val, %val, %val, %val, i32* %base, %mask, i32 %vl) + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv4i32( %val, %val, %val, %val, i32* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,, i32} %0, 1 %2 = extractvalue {,,,, i32} %0, 4 store i32 %2, i32* %outvl @@ -3051,7 +3051,7 @@ } declare {,, i32} @llvm.riscv.vlseg2ff.nxv16f16(half* , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16f16(,, half*, , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16f16(,, half*, , i32, i32) define @test_vlseg2ff_nxv16f16(half* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv16f16: @@ -3074,14 +3074,14 @@ ; CHECK-LABEL: test_vlseg2ff_mask_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v4, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16f16( %val, %val, half* %base, %mask, i32 %vl) + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16f16( %val, %val, half* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,, i32} %0, 1 %2 = extractvalue {,, i32} %0, 2 store i32 %2, i32* %outvl @@ -3089,7 +3089,7 @@ } declare {,, i32} @llvm.riscv.vlseg2ff.nxv4f64(double* , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4f64(,, double*, , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4f64(,, double*, , i32, i32) define @test_vlseg2ff_nxv4f64(double* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv4f64: @@ -3112,14 +3112,14 @@ ; CHECK-LABEL: test_vlseg2ff_mask_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vlseg2e64ff.v v4, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4f64( %val, %val, double* %base, %mask, i32 %vl) + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4f64( %val, %val, double* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,, i32} %0, 1 %2 = extractvalue {,, i32} %0, 2 store i32 %2, i32* %outvl @@ -3127,7 +3127,7 @@ } declare {,, i32} @llvm.riscv.vlseg2ff.nxv1f64(double* , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1f64(,, double*, , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1f64(,, double*, , i32, i32) define @test_vlseg2ff_nxv1f64(double* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv1f64: @@ -3150,14 +3150,14 @@ ; CHECK-LABEL: test_vlseg2ff_mask_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg2e64ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1f64( %val, %val, double* %base, %mask, i32 %vl) + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1f64( %val, %val, double* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,, i32} %0, 1 %2 = extractvalue {,, i32} %0, 2 store i32 %2, i32* %outvl @@ -3165,7 +3165,7 @@ } declare {,,, i32} @llvm.riscv.vlseg3ff.nxv1f64(double* , i32) -declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv1f64(,,, double*, , i32) +declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv1f64(,,, double*, , i32, i32) define @test_vlseg3ff_nxv1f64(double* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv1f64: @@ -3189,14 +3189,14 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg3e64ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv1f64( %val, %val, %val, double* %base, %mask, i32 %vl) + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv1f64( %val, %val, %val, double* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,, i32} %0, 1 %2 = extractvalue {,,, i32} %0, 3 store i32 %2, i32* %outvl @@ -3204,7 +3204,7 @@ } declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv1f64(double* , i32) -declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv1f64(,,,, double*, , i32) +declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv1f64(,,,, double*, , i32, i32) define @test_vlseg4ff_nxv1f64(double* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv1f64: @@ -3229,14 +3229,14 @@ ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg4e64ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv1f64( %val, %val, %val, %val, double* %base, %mask, i32 %vl) + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv1f64( %val, %val, %val, %val, double* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,, i32} %0, 1 %2 = extractvalue {,,,, i32} %0, 4 store i32 %2, i32* %outvl @@ -3244,7 +3244,7 @@ } declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv1f64(double* , i32) -declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv1f64(,,,,, double*, , i32) +declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv1f64(,,,,, double*, , i32, i32) define @test_vlseg5ff_nxv1f64(double* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv1f64: @@ -3270,14 +3270,14 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg5e64ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv1f64( %val, %val, %val, %val, %val, double* %base, %mask, i32 %vl) + %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv1f64( %val, %val, %val, %val, %val, double* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,, i32} %0, 1 %2 = extractvalue {,,,,, i32} %0, 5 store i32 %2, i32* %outvl @@ -3285,7 +3285,7 @@ } declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv1f64(double* , i32) -declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv1f64(,,,,,, double*, , i32) +declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv1f64(,,,,,, double*, , i32, i32) define @test_vlseg6ff_nxv1f64(double* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv1f64: @@ -3312,14 +3312,14 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg6e64ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv1f64( %val, %val, %val, %val, %val, %val, double* %base, %mask, i32 %vl) + %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv1f64( %val, %val, %val, %val, %val, %val, double* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,, i32} %0, 6 store i32 %2, i32* %outvl @@ -3327,7 +3327,7 @@ } declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv1f64(double* , i32) -declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv1f64(,,,,,,, double*, , i32) +declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv1f64(,,,,,,, double*, , i32, i32) define @test_vlseg7ff_nxv1f64(double* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv1f64: @@ -3355,14 +3355,14 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg7e64ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv1f64( %val, %val, %val, %val, %val, %val, %val, double* %base, %mask, i32 %vl) + %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv1f64( %val, %val, %val, %val, %val, %val, %val, double* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,,, i32} %0, 7 store i32 %2, i32* %outvl @@ -3370,7 +3370,7 @@ } declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv1f64(double* , i32) -declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv1f64(,,,,,,,, double*, , i32) +declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv1f64(,,,,,,,, double*, , i32, i32) define @test_vlseg8ff_nxv1f64(double* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv1f64: @@ -3399,14 +3399,14 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg8e64ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv1f64( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %mask, i32 %vl) + %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv1f64( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,,,, i32} %0, 8 store i32 %2, i32* %outvl @@ -3414,7 +3414,7 @@ } declare {,, i32} @llvm.riscv.vlseg2ff.nxv2f32(float* , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2f32(,, float*, , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2f32(,, float*, , i32, i32) define @test_vlseg2ff_nxv2f32(float* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv2f32: @@ -3437,14 +3437,14 @@ ; CHECK-LABEL: test_vlseg2ff_mask_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg2e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2f32( %val, %val, float* %base, %mask, i32 %vl) + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2f32( %val, %val, float* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,, i32} %0, 1 %2 = extractvalue {,, i32} %0, 2 store i32 %2, i32* %outvl @@ -3452,7 +3452,7 @@ } declare {,,, i32} @llvm.riscv.vlseg3ff.nxv2f32(float* , i32) -declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv2f32(,,, float*, , i32) +declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv2f32(,,, float*, , i32, i32) define @test_vlseg3ff_nxv2f32(float* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv2f32: @@ -3476,14 +3476,14 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg3e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv2f32( %val, %val, %val, float* %base, %mask, i32 %vl) + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv2f32( %val, %val, %val, float* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,, i32} %0, 1 %2 = extractvalue {,,, i32} %0, 3 store i32 %2, i32* %outvl @@ -3491,7 +3491,7 @@ } declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv2f32(float* , i32) -declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv2f32(,,,, float*, , i32) +declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv2f32(,,,, float*, , i32, i32) define @test_vlseg4ff_nxv2f32(float* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv2f32: @@ -3516,14 +3516,14 @@ ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg4e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv2f32( %val, %val, %val, %val, float* %base, %mask, i32 %vl) + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv2f32( %val, %val, %val, %val, float* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,, i32} %0, 1 %2 = extractvalue {,,,, i32} %0, 4 store i32 %2, i32* %outvl @@ -3531,7 +3531,7 @@ } declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv2f32(float* , i32) -declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv2f32(,,,,, float*, , i32) +declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv2f32(,,,,, float*, , i32, i32) define @test_vlseg5ff_nxv2f32(float* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv2f32: @@ -3557,14 +3557,14 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg5e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv2f32( %val, %val, %val, %val, %val, float* %base, %mask, i32 %vl) + %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv2f32( %val, %val, %val, %val, %val, float* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,, i32} %0, 1 %2 = extractvalue {,,,,, i32} %0, 5 store i32 %2, i32* %outvl @@ -3572,7 +3572,7 @@ } declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv2f32(float* , i32) -declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv2f32(,,,,,, float*, , i32) +declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv2f32(,,,,,, float*, , i32, i32) define @test_vlseg6ff_nxv2f32(float* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv2f32: @@ -3599,14 +3599,14 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg6e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv2f32( %val, %val, %val, %val, %val, %val, float* %base, %mask, i32 %vl) + %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv2f32( %val, %val, %val, %val, %val, %val, float* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,, i32} %0, 6 store i32 %2, i32* %outvl @@ -3614,7 +3614,7 @@ } declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv2f32(float* , i32) -declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv2f32(,,,,,,, float*, , i32) +declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv2f32(,,,,,,, float*, , i32, i32) define @test_vlseg7ff_nxv2f32(float* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv2f32: @@ -3642,14 +3642,14 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg7e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv2f32( %val, %val, %val, %val, %val, %val, %val, float* %base, %mask, i32 %vl) + %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv2f32( %val, %val, %val, %val, %val, %val, %val, float* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,,, i32} %0, 7 store i32 %2, i32* %outvl @@ -3657,7 +3657,7 @@ } declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv2f32(float* , i32) -declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv2f32(,,,,,,,, float*, , i32) +declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv2f32(,,,,,,,, float*, , i32, i32) define @test_vlseg8ff_nxv2f32(float* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv2f32: @@ -3686,14 +3686,14 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg8e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv2f32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %mask, i32 %vl) + %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv2f32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,,,, i32} %0, 8 store i32 %2, i32* %outvl @@ -3701,7 +3701,7 @@ } declare {,, i32} @llvm.riscv.vlseg2ff.nxv1f16(half* , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1f16(,, half*, , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1f16(,, half*, , i32, i32) define @test_vlseg2ff_nxv1f16(half* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv1f16: @@ -3724,14 +3724,14 @@ ; CHECK-LABEL: test_vlseg2ff_mask_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1f16( %val, %val, half* %base, %mask, i32 %vl) + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1f16( %val, %val, half* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,, i32} %0, 1 %2 = extractvalue {,, i32} %0, 2 store i32 %2, i32* %outvl @@ -3739,7 +3739,7 @@ } declare {,,, i32} @llvm.riscv.vlseg3ff.nxv1f16(half* , i32) -declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv1f16(,,, half*, , i32) +declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv1f16(,,, half*, , i32, i32) define @test_vlseg3ff_nxv1f16(half* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv1f16: @@ -3763,14 +3763,14 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv1f16( %val, %val, %val, half* %base, %mask, i32 %vl) + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv1f16( %val, %val, %val, half* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,, i32} %0, 1 %2 = extractvalue {,,, i32} %0, 3 store i32 %2, i32* %outvl @@ -3778,7 +3778,7 @@ } declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv1f16(half* , i32) -declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv1f16(,,,, half*, , i32) +declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv1f16(,,,, half*, , i32, i32) define @test_vlseg4ff_nxv1f16(half* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv1f16: @@ -3803,14 +3803,14 @@ ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv1f16( %val, %val, %val, %val, half* %base, %mask, i32 %vl) + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv1f16( %val, %val, %val, %val, half* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,, i32} %0, 1 %2 = extractvalue {,,,, i32} %0, 4 store i32 %2, i32* %outvl @@ -3818,7 +3818,7 @@ } declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv1f16(half* , i32) -declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv1f16(,,,,, half*, , i32) +declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv1f16(,,,,, half*, , i32, i32) define @test_vlseg5ff_nxv1f16(half* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv1f16: @@ -3844,14 +3844,14 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv1f16( %val, %val, %val, %val, %val, half* %base, %mask, i32 %vl) + %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv1f16( %val, %val, %val, %val, %val, half* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,, i32} %0, 1 %2 = extractvalue {,,,,, i32} %0, 5 store i32 %2, i32* %outvl @@ -3859,7 +3859,7 @@ } declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv1f16(half* , i32) -declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv1f16(,,,,,, half*, , i32) +declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv1f16(,,,,,, half*, , i32, i32) define @test_vlseg6ff_nxv1f16(half* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv1f16: @@ -3886,14 +3886,14 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv1f16( %val, %val, %val, %val, %val, %val, half* %base, %mask, i32 %vl) + %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv1f16( %val, %val, %val, %val, %val, %val, half* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,, i32} %0, 6 store i32 %2, i32* %outvl @@ -3901,7 +3901,7 @@ } declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv1f16(half* , i32) -declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv1f16(,,,,,,, half*, , i32) +declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv1f16(,,,,,,, half*, , i32, i32) define @test_vlseg7ff_nxv1f16(half* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv1f16: @@ -3929,14 +3929,14 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv1f16( %val, %val, %val, %val, %val, %val, %val, half* %base, %mask, i32 %vl) + %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv1f16( %val, %val, %val, %val, %val, %val, %val, half* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,,, i32} %0, 7 store i32 %2, i32* %outvl @@ -3944,7 +3944,7 @@ } declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv1f16(half* , i32) -declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv1f16(,,,,,,,, half*, , i32) +declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv1f16(,,,,,,,, half*, , i32, i32) define @test_vlseg8ff_nxv1f16(half* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv1f16: @@ -3973,14 +3973,14 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv1f16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %mask, i32 %vl) + %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv1f16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,,,, i32} %0, 8 store i32 %2, i32* %outvl @@ -3988,7 +3988,7 @@ } declare {,, i32} @llvm.riscv.vlseg2ff.nxv1f32(float* , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1f32(,, float*, , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1f32(,, float*, , i32, i32) define @test_vlseg2ff_nxv1f32(float* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv1f32: @@ -4011,14 +4011,14 @@ ; CHECK-LABEL: test_vlseg2ff_mask_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg2e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1f32( %val, %val, float* %base, %mask, i32 %vl) + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1f32( %val, %val, float* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,, i32} %0, 1 %2 = extractvalue {,, i32} %0, 2 store i32 %2, i32* %outvl @@ -4026,7 +4026,7 @@ } declare {,,, i32} @llvm.riscv.vlseg3ff.nxv1f32(float* , i32) -declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv1f32(,,, float*, , i32) +declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv1f32(,,, float*, , i32, i32) define @test_vlseg3ff_nxv1f32(float* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv1f32: @@ -4050,14 +4050,14 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg3e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv1f32( %val, %val, %val, float* %base, %mask, i32 %vl) + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv1f32( %val, %val, %val, float* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,, i32} %0, 1 %2 = extractvalue {,,, i32} %0, 3 store i32 %2, i32* %outvl @@ -4065,7 +4065,7 @@ } declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv1f32(float* , i32) -declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv1f32(,,,, float*, , i32) +declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv1f32(,,,, float*, , i32, i32) define @test_vlseg4ff_nxv1f32(float* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv1f32: @@ -4090,14 +4090,14 @@ ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg4e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv1f32( %val, %val, %val, %val, float* %base, %mask, i32 %vl) + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv1f32( %val, %val, %val, %val, float* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,, i32} %0, 1 %2 = extractvalue {,,,, i32} %0, 4 store i32 %2, i32* %outvl @@ -4105,7 +4105,7 @@ } declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv1f32(float* , i32) -declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv1f32(,,,,, float*, , i32) +declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv1f32(,,,,, float*, , i32, i32) define @test_vlseg5ff_nxv1f32(float* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv1f32: @@ -4131,14 +4131,14 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg5e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv1f32( %val, %val, %val, %val, %val, float* %base, %mask, i32 %vl) + %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv1f32( %val, %val, %val, %val, %val, float* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,, i32} %0, 1 %2 = extractvalue {,,,,, i32} %0, 5 store i32 %2, i32* %outvl @@ -4146,7 +4146,7 @@ } declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv1f32(float* , i32) -declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv1f32(,,,,,, float*, , i32) +declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv1f32(,,,,,, float*, , i32, i32) define @test_vlseg6ff_nxv1f32(float* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv1f32: @@ -4173,14 +4173,14 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg6e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv1f32( %val, %val, %val, %val, %val, %val, float* %base, %mask, i32 %vl) + %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv1f32( %val, %val, %val, %val, %val, %val, float* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,, i32} %0, 6 store i32 %2, i32* %outvl @@ -4188,7 +4188,7 @@ } declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv1f32(float* , i32) -declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv1f32(,,,,,,, float*, , i32) +declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv1f32(,,,,,,, float*, , i32, i32) define @test_vlseg7ff_nxv1f32(float* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv1f32: @@ -4216,14 +4216,14 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg7e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv1f32( %val, %val, %val, %val, %val, %val, %val, float* %base, %mask, i32 %vl) + %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv1f32( %val, %val, %val, %val, %val, %val, %val, float* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,,, i32} %0, 7 store i32 %2, i32* %outvl @@ -4231,7 +4231,7 @@ } declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv1f32(float* , i32) -declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv1f32(,,,,,,,, float*, , i32) +declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv1f32(,,,,,,,, float*, , i32, i32) define @test_vlseg8ff_nxv1f32(float* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv1f32: @@ -4260,14 +4260,14 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg8e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv1f32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %mask, i32 %vl) + %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv1f32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,,,, i32} %0, 8 store i32 %2, i32* %outvl @@ -4275,7 +4275,7 @@ } declare {,, i32} @llvm.riscv.vlseg2ff.nxv8f16(half* , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8f16(,, half*, , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8f16(,, half*, , i32, i32) define @test_vlseg2ff_nxv8f16(half* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv8f16: @@ -4298,14 +4298,14 @@ ; CHECK-LABEL: test_vlseg2ff_mask_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8f16( %val, %val, half* %base, %mask, i32 %vl) + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8f16( %val, %val, half* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,, i32} %0, 1 %2 = extractvalue {,, i32} %0, 2 store i32 %2, i32* %outvl @@ -4313,7 +4313,7 @@ } declare {,,, i32} @llvm.riscv.vlseg3ff.nxv8f16(half* , i32) -declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv8f16(,,, half*, , i32) +declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv8f16(,,, half*, , i32, i32) define @test_vlseg3ff_nxv8f16(half* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv8f16: @@ -4337,14 +4337,14 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vlseg3e16ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv8f16( %val, %val, %val, half* %base, %mask, i32 %vl) + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv8f16( %val, %val, %val, half* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,, i32} %0, 1 %2 = extractvalue {,,, i32} %0, 3 store i32 %2, i32* %outvl @@ -4352,7 +4352,7 @@ } declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv8f16(half* , i32) -declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv8f16(,,,, half*, , i32) +declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv8f16(,,,, half*, , i32, i32) define @test_vlseg4ff_nxv8f16(half* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv8f16: @@ -4377,14 +4377,14 @@ ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vlseg4e16ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv8f16( %val, %val, %val, %val, half* %base, %mask, i32 %vl) + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv8f16( %val, %val, %val, %val, half* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,, i32} %0, 1 %2 = extractvalue {,,,, i32} %0, 4 store i32 %2, i32* %outvl @@ -4392,7 +4392,7 @@ } declare {,, i32} @llvm.riscv.vlseg2ff.nxv8f32(float* , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8f32(,, float*, , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8f32(,, float*, , i32, i32) define @test_vlseg2ff_nxv8f32(float* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv8f32: @@ -4415,14 +4415,14 @@ ; CHECK-LABEL: test_vlseg2ff_mask_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vlseg2e32ff.v v4, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8f32( %val, %val, float* %base, %mask, i32 %vl) + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8f32( %val, %val, float* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,, i32} %0, 1 %2 = extractvalue {,, i32} %0, 2 store i32 %2, i32* %outvl @@ -4430,7 +4430,7 @@ } declare {,, i32} @llvm.riscv.vlseg2ff.nxv2f64(double* , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2f64(,, double*, , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2f64(,, double*, , i32, i32) define @test_vlseg2ff_nxv2f64(double* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv2f64: @@ -4453,14 +4453,14 @@ ; CHECK-LABEL: test_vlseg2ff_mask_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vlseg2e64ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2f64( %val, %val, double* %base, %mask, i32 %vl) + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2f64( %val, %val, double* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,, i32} %0, 1 %2 = extractvalue {,, i32} %0, 2 store i32 %2, i32* %outvl @@ -4468,7 +4468,7 @@ } declare {,,, i32} @llvm.riscv.vlseg3ff.nxv2f64(double* , i32) -declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv2f64(,,, double*, , i32) +declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv2f64(,,, double*, , i32, i32) define @test_vlseg3ff_nxv2f64(double* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv2f64: @@ -4492,14 +4492,14 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vlseg3e64ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv2f64( %val, %val, %val, double* %base, %mask, i32 %vl) + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv2f64( %val, %val, %val, double* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,, i32} %0, 1 %2 = extractvalue {,,, i32} %0, 3 store i32 %2, i32* %outvl @@ -4507,7 +4507,7 @@ } declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv2f64(double* , i32) -declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv2f64(,,,, double*, , i32) +declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv2f64(,,,, double*, , i32, i32) define @test_vlseg4ff_nxv2f64(double* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv2f64: @@ -4532,14 +4532,14 @@ ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vlseg4e64ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv2f64( %val, %val, %val, %val, double* %base, %mask, i32 %vl) + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv2f64( %val, %val, %val, %val, double* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,, i32} %0, 1 %2 = extractvalue {,,,, i32} %0, 4 store i32 %2, i32* %outvl @@ -4547,7 +4547,7 @@ } declare {,, i32} @llvm.riscv.vlseg2ff.nxv4f16(half* , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4f16(,, half*, , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4f16(,, half*, , i32, i32) define @test_vlseg2ff_nxv4f16(half* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv4f16: @@ -4570,14 +4570,14 @@ ; CHECK-LABEL: test_vlseg2ff_mask_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4f16( %val, %val, half* %base, %mask, i32 %vl) + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4f16( %val, %val, half* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,, i32} %0, 1 %2 = extractvalue {,, i32} %0, 2 store i32 %2, i32* %outvl @@ -4585,7 +4585,7 @@ } declare {,,, i32} @llvm.riscv.vlseg3ff.nxv4f16(half* , i32) -declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv4f16(,,, half*, , i32) +declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv4f16(,,, half*, , i32, i32) define @test_vlseg3ff_nxv4f16(half* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv4f16: @@ -4609,14 +4609,14 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv4f16( %val, %val, %val, half* %base, %mask, i32 %vl) + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv4f16( %val, %val, %val, half* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,, i32} %0, 1 %2 = extractvalue {,,, i32} %0, 3 store i32 %2, i32* %outvl @@ -4624,7 +4624,7 @@ } declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv4f16(half* , i32) -declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv4f16(,,,, half*, , i32) +declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv4f16(,,,, half*, , i32, i32) define @test_vlseg4ff_nxv4f16(half* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv4f16: @@ -4649,14 +4649,14 @@ ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv4f16( %val, %val, %val, %val, half* %base, %mask, i32 %vl) + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv4f16( %val, %val, %val, %val, half* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,, i32} %0, 1 %2 = extractvalue {,,,, i32} %0, 4 store i32 %2, i32* %outvl @@ -4664,7 +4664,7 @@ } declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv4f16(half* , i32) -declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv4f16(,,,,, half*, , i32) +declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv4f16(,,,,, half*, , i32, i32) define @test_vlseg5ff_nxv4f16(half* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv4f16: @@ -4690,14 +4690,14 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv4f16( %val, %val, %val, %val, %val, half* %base, %mask, i32 %vl) + %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv4f16( %val, %val, %val, %val, %val, half* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,, i32} %0, 1 %2 = extractvalue {,,,,, i32} %0, 5 store i32 %2, i32* %outvl @@ -4705,7 +4705,7 @@ } declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv4f16(half* , i32) -declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv4f16(,,,,,, half*, , i32) +declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv4f16(,,,,,, half*, , i32, i32) define @test_vlseg6ff_nxv4f16(half* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv4f16: @@ -4732,14 +4732,14 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv4f16( %val, %val, %val, %val, %val, %val, half* %base, %mask, i32 %vl) + %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv4f16( %val, %val, %val, %val, %val, %val, half* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,, i32} %0, 6 store i32 %2, i32* %outvl @@ -4747,7 +4747,7 @@ } declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv4f16(half* , i32) -declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv4f16(,,,,,,, half*, , i32) +declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv4f16(,,,,,,, half*, , i32, i32) define @test_vlseg7ff_nxv4f16(half* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv4f16: @@ -4775,14 +4775,14 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv4f16( %val, %val, %val, %val, %val, %val, %val, half* %base, %mask, i32 %vl) + %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv4f16( %val, %val, %val, %val, %val, %val, %val, half* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,,, i32} %0, 7 store i32 %2, i32* %outvl @@ -4790,7 +4790,7 @@ } declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv4f16(half* , i32) -declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv4f16(,,,,,,,, half*, , i32) +declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv4f16(,,,,,,,, half*, , i32, i32) define @test_vlseg8ff_nxv4f16(half* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv4f16: @@ -4819,14 +4819,14 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv4f16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %mask, i32 %vl) + %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv4f16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,,,, i32} %0, 8 store i32 %2, i32* %outvl @@ -4834,7 +4834,7 @@ } declare {,, i32} @llvm.riscv.vlseg2ff.nxv2f16(half* , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2f16(,, half*, , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2f16(,, half*, , i32, i32) define @test_vlseg2ff_nxv2f16(half* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv2f16: @@ -4857,14 +4857,14 @@ ; CHECK-LABEL: test_vlseg2ff_mask_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2f16( %val, %val, half* %base, %mask, i32 %vl) + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2f16( %val, %val, half* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,, i32} %0, 1 %2 = extractvalue {,, i32} %0, 2 store i32 %2, i32* %outvl @@ -4872,7 +4872,7 @@ } declare {,,, i32} @llvm.riscv.vlseg3ff.nxv2f16(half* , i32) -declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv2f16(,,, half*, , i32) +declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv2f16(,,, half*, , i32, i32) define @test_vlseg3ff_nxv2f16(half* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv2f16: @@ -4896,14 +4896,14 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv2f16( %val, %val, %val, half* %base, %mask, i32 %vl) + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv2f16( %val, %val, %val, half* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,, i32} %0, 1 %2 = extractvalue {,,, i32} %0, 3 store i32 %2, i32* %outvl @@ -4911,7 +4911,7 @@ } declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv2f16(half* , i32) -declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv2f16(,,,, half*, , i32) +declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv2f16(,,,, half*, , i32, i32) define @test_vlseg4ff_nxv2f16(half* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv2f16: @@ -4936,14 +4936,14 @@ ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv2f16( %val, %val, %val, %val, half* %base, %mask, i32 %vl) + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv2f16( %val, %val, %val, %val, half* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,, i32} %0, 1 %2 = extractvalue {,,,, i32} %0, 4 store i32 %2, i32* %outvl @@ -4951,7 +4951,7 @@ } declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv2f16(half* , i32) -declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv2f16(,,,,, half*, , i32) +declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv2f16(,,,,, half*, , i32, i32) define @test_vlseg5ff_nxv2f16(half* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv2f16: @@ -4977,14 +4977,14 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv2f16( %val, %val, %val, %val, %val, half* %base, %mask, i32 %vl) + %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv2f16( %val, %val, %val, %val, %val, half* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,, i32} %0, 1 %2 = extractvalue {,,,,, i32} %0, 5 store i32 %2, i32* %outvl @@ -4992,7 +4992,7 @@ } declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv2f16(half* , i32) -declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv2f16(,,,,,, half*, , i32) +declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv2f16(,,,,,, half*, , i32, i32) define @test_vlseg6ff_nxv2f16(half* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv2f16: @@ -5019,14 +5019,14 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv2f16( %val, %val, %val, %val, %val, %val, half* %base, %mask, i32 %vl) + %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv2f16( %val, %val, %val, %val, %val, %val, half* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,, i32} %0, 6 store i32 %2, i32* %outvl @@ -5034,7 +5034,7 @@ } declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv2f16(half* , i32) -declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv2f16(,,,,,,, half*, , i32) +declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv2f16(,,,,,,, half*, , i32, i32) define @test_vlseg7ff_nxv2f16(half* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv2f16: @@ -5062,14 +5062,14 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv2f16( %val, %val, %val, %val, %val, %val, %val, half* %base, %mask, i32 %vl) + %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv2f16( %val, %val, %val, %val, %val, %val, %val, half* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,,, i32} %0, 7 store i32 %2, i32* %outvl @@ -5077,7 +5077,7 @@ } declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv2f16(half* , i32) -declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv2f16(,,,,,,,, half*, , i32) +declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv2f16(,,,,,,,, half*, , i32, i32) define @test_vlseg8ff_nxv2f16(half* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv2f16: @@ -5106,14 +5106,14 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv2f16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %mask, i32 %vl) + %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv2f16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,,,, i32} %0, 8 store i32 %2, i32* %outvl @@ -5121,7 +5121,7 @@ } declare {,, i32} @llvm.riscv.vlseg2ff.nxv4f32(float* , i32) -declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4f32(,, float*, , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4f32(,, float*, , i32, i32) define @test_vlseg2ff_nxv4f32(float* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv4f32: @@ -5144,14 +5144,14 @@ ; CHECK-LABEL: test_vlseg2ff_mask_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vlseg2e32ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4f32( %val, %val, float* %base, %mask, i32 %vl) + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4f32( %val, %val, float* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,, i32} %0, 1 %2 = extractvalue {,, i32} %0, 2 store i32 %2, i32* %outvl @@ -5159,7 +5159,7 @@ } declare {,,, i32} @llvm.riscv.vlseg3ff.nxv4f32(float* , i32) -declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv4f32(,,, float*, , i32) +declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv4f32(,,, float*, , i32, i32) define @test_vlseg3ff_nxv4f32(float* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv4f32: @@ -5183,14 +5183,14 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vlseg3e32ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv4f32( %val, %val, %val, float* %base, %mask, i32 %vl) + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv4f32( %val, %val, %val, float* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,, i32} %0, 1 %2 = extractvalue {,,, i32} %0, 3 store i32 %2, i32* %outvl @@ -5198,7 +5198,7 @@ } declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv4f32(float* , i32) -declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv4f32(,,,, float*, , i32) +declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv4f32(,,,, float*, , i32, i32) define @test_vlseg4ff_nxv4f32(float* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv4f32: @@ -5223,14 +5223,14 @@ ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vlseg4e32ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv4f32( %val, %val, %val, %val, float* %base, %mask, i32 %vl) + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv4f32( %val, %val, %val, %val, float* %base, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,, i32} %0, 1 %2 = extractvalue {,,,, i32} %0, 4 store i32 %2, i32* %outvl diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64-dead.ll b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64-dead.ll --- a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64-dead.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64-dead.ll @@ -3,7 +3,7 @@ ; RUN: -verify-machineinstrs < %s | FileCheck %s declare {,, i64} @llvm.riscv.vlseg2ff.nxv16i16(i16* , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16(,, i16*, , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16(,, i16*, , i64, i64) define void @test_vlseg2ff_dead_value(i16* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg2ff_dead_value: @@ -25,13 +25,13 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16( %val, %val, i16* %base, %mask, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16( %val, %val, i16* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,, i64} %0, 2 store i64 %1, i64* %outvl ret void @@ -54,12 +54,12 @@ ; CHECK-LABEL: test_vlseg2ff_mask_dead_vl: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v4, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16( %val, %val, i16* %base, %mask, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16( %val, %val, i16* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,, i64} %0, 1 ret %1 } @@ -80,10 +80,10 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: - tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16( %val, %val, i16* %base, %mask, i64 %vl) + tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16( %val, %val, i16* %base, %mask, i64 %vl, i64 1) ret void } diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64.ll @@ -3,7 +3,7 @@ ; RUN: -verify-machineinstrs < %s | FileCheck %s declare {,, i64} @llvm.riscv.vlseg2ff.nxv16i16(i16* , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16(,, i16*, , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16(,, i16*, , i64, i64) define @test_vlseg2ff_nxv16i16(i16* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv16i16: @@ -26,14 +26,14 @@ ; CHECK-LABEL: test_vlseg2ff_mask_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v4, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16( %val, %val, i16* %base, %mask, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16( %val, %val, i16* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,, i64} %0, 1 %2 = extractvalue {,, i64} %0, 2 store i64 %2, i64* %outvl @@ -41,7 +41,7 @@ } declare {,, i64} @llvm.riscv.vlseg2ff.nxv4i32(i32* , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i32(,, i32*, , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i32(,, i32*, , i64, i64) define @test_vlseg2ff_nxv4i32(i32* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv4i32: @@ -64,14 +64,14 @@ ; CHECK-LABEL: test_vlseg2ff_mask_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vlseg2e32ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i32( %val, %val, i32* %base, %mask, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i32( %val, %val, i32* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,, i64} %0, 1 %2 = extractvalue {,, i64} %0, 2 store i64 %2, i64* %outvl @@ -79,7 +79,7 @@ } declare {,,, i64} @llvm.riscv.vlseg3ff.nxv4i32(i32* , i64) -declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv4i32(,,, i32*, , i64) +declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv4i32(,,, i32*, , i64, i64) define @test_vlseg3ff_nxv4i32(i32* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv4i32: @@ -103,14 +103,14 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vlseg3e32ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv4i32( %val, %val, %val, i32* %base, %mask, i64 %vl) + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv4i32( %val, %val, %val, i32* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,, i64} %0, 1 %2 = extractvalue {,,, i64} %0, 3 store i64 %2, i64* %outvl @@ -118,7 +118,7 @@ } declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv4i32(i32* , i64) -declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv4i32(,,,, i32*, , i64) +declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv4i32(,,,, i32*, , i64, i64) define @test_vlseg4ff_nxv4i32(i32* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv4i32: @@ -143,14 +143,14 @@ ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vlseg4e32ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv4i32( %val, %val, %val, %val, i32* %base, %mask, i64 %vl) + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv4i32( %val, %val, %val, %val, i32* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,, i64} %0, 1 %2 = extractvalue {,,,, i64} %0, 4 store i64 %2, i64* %outvl @@ -158,7 +158,7 @@ } declare {,, i64} @llvm.riscv.vlseg2ff.nxv16i8(i8* , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i8(,, i8*, , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i8(,, i8*, , i64, i64) define @test_vlseg2ff_nxv16i8(i8* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv16i8: @@ -181,14 +181,14 @@ ; CHECK-LABEL: test_vlseg2ff_mask_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vlseg2e8ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i8( %val, %val, i8* %base, %mask, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i8( %val, %val, i8* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,, i64} %0, 1 %2 = extractvalue {,, i64} %0, 2 store i64 %2, i64* %outvl @@ -196,7 +196,7 @@ } declare {,,, i64} @llvm.riscv.vlseg3ff.nxv16i8(i8* , i64) -declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv16i8(,,, i8*, , i64) +declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv16i8(,,, i8*, , i64, i64) define @test_vlseg3ff_nxv16i8(i8* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv16i8: @@ -220,14 +220,14 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vlseg3e8ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv16i8( %val, %val, %val, i8* %base, %mask, i64 %vl) + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv16i8( %val, %val, %val, i8* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,, i64} %0, 1 %2 = extractvalue {,,, i64} %0, 3 store i64 %2, i64* %outvl @@ -235,7 +235,7 @@ } declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv16i8(i8* , i64) -declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv16i8(,,,, i8*, , i64) +declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv16i8(,,,, i8*, , i64, i64) define @test_vlseg4ff_nxv16i8(i8* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv16i8: @@ -260,14 +260,14 @@ ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vlseg4e8ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv16i8( %val, %val, %val, %val, i8* %base, %mask, i64 %vl) + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv16i8( %val, %val, %val, %val, i8* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,, i64} %0, 1 %2 = extractvalue {,,,, i64} %0, 4 store i64 %2, i64* %outvl @@ -275,7 +275,7 @@ } declare {,, i64} @llvm.riscv.vlseg2ff.nxv1i64(i64* , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1i64(,, i64*, , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1i64(,, i64*, , i64, i64) define @test_vlseg2ff_nxv1i64(i64* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv1i64: @@ -298,14 +298,14 @@ ; CHECK-LABEL: test_vlseg2ff_mask_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg2e64ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1i64( %val, %val, i64* %base, %mask, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1i64( %val, %val, i64* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,, i64} %0, 1 %2 = extractvalue {,, i64} %0, 2 store i64 %2, i64* %outvl @@ -313,7 +313,7 @@ } declare {,,, i64} @llvm.riscv.vlseg3ff.nxv1i64(i64* , i64) -declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1i64(,,, i64*, , i64) +declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1i64(,,, i64*, , i64, i64) define @test_vlseg3ff_nxv1i64(i64* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv1i64: @@ -337,14 +337,14 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg3e64ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1i64( %val, %val, %val, i64* %base, %mask, i64 %vl) + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1i64( %val, %val, %val, i64* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,, i64} %0, 1 %2 = extractvalue {,,, i64} %0, 3 store i64 %2, i64* %outvl @@ -352,7 +352,7 @@ } declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv1i64(i64* , i64) -declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1i64(,,,, i64*, , i64) +declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1i64(,,,, i64*, , i64, i64) define @test_vlseg4ff_nxv1i64(i64* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv1i64: @@ -377,14 +377,14 @@ ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg4e64ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1i64( %val, %val, %val, %val, i64* %base, %mask, i64 %vl) + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1i64( %val, %val, %val, %val, i64* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,, i64} %0, 1 %2 = extractvalue {,,,, i64} %0, 4 store i64 %2, i64* %outvl @@ -392,7 +392,7 @@ } declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1i64(i64* , i64) -declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1i64(,,,,, i64*, , i64) +declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1i64(,,,,, i64*, , i64, i64) define @test_vlseg5ff_nxv1i64(i64* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv1i64: @@ -418,14 +418,14 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg5e64ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1i64( %val, %val, %val, %val, %val, i64* %base, %mask, i64 %vl) + %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1i64( %val, %val, %val, %val, %val, i64* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,, i64} %0, 1 %2 = extractvalue {,,,,, i64} %0, 5 store i64 %2, i64* %outvl @@ -433,7 +433,7 @@ } declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1i64(i64* , i64) -declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1i64(,,,,,, i64*, , i64) +declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1i64(,,,,,, i64*, , i64, i64) define @test_vlseg6ff_nxv1i64(i64* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv1i64: @@ -460,14 +460,14 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg6e64ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1i64( %val, %val, %val, %val, %val, %val, i64* %base, %mask, i64 %vl) + %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1i64( %val, %val, %val, %val, %val, %val, i64* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,, i64} %0, 6 store i64 %2, i64* %outvl @@ -475,7 +475,7 @@ } declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1i64(i64* , i64) -declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1i64(,,,,,,, i64*, , i64) +declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1i64(,,,,,,, i64*, , i64, i64) define @test_vlseg7ff_nxv1i64(i64* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv1i64: @@ -503,14 +503,14 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg7e64ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i64* %base, %mask, i64 %vl) + %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i64* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,,, i64} %0, 7 store i64 %2, i64* %outvl @@ -518,7 +518,7 @@ } declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1i64(i64* , i64) -declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1i64(,,,,,,,, i64*, , i64) +declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1i64(,,,,,,,, i64*, , i64, i64) define @test_vlseg8ff_nxv1i64(i64* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv1i64: @@ -547,14 +547,14 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg8e64ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %mask, i64 %vl) + %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,,,, i64} %0, 8 store i64 %2, i64* %outvl @@ -562,7 +562,7 @@ } declare {,, i64} @llvm.riscv.vlseg2ff.nxv1i32(i32* , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1i32(,, i32*, , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1i32(,, i32*, , i64, i64) define @test_vlseg2ff_nxv1i32(i32* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv1i32: @@ -585,14 +585,14 @@ ; CHECK-LABEL: test_vlseg2ff_mask_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg2e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1i32( %val, %val, i32* %base, %mask, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1i32( %val, %val, i32* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,, i64} %0, 1 %2 = extractvalue {,, i64} %0, 2 store i64 %2, i64* %outvl @@ -600,7 +600,7 @@ } declare {,,, i64} @llvm.riscv.vlseg3ff.nxv1i32(i32* , i64) -declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1i32(,,, i32*, , i64) +declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1i32(,,, i32*, , i64, i64) define @test_vlseg3ff_nxv1i32(i32* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv1i32: @@ -624,14 +624,14 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg3e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1i32( %val, %val, %val, i32* %base, %mask, i64 %vl) + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1i32( %val, %val, %val, i32* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,, i64} %0, 1 %2 = extractvalue {,,, i64} %0, 3 store i64 %2, i64* %outvl @@ -639,7 +639,7 @@ } declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv1i32(i32* , i64) -declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1i32(,,,, i32*, , i64) +declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1i32(,,,, i32*, , i64, i64) define @test_vlseg4ff_nxv1i32(i32* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv1i32: @@ -664,14 +664,14 @@ ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg4e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1i32( %val, %val, %val, %val, i32* %base, %mask, i64 %vl) + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1i32( %val, %val, %val, %val, i32* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,, i64} %0, 1 %2 = extractvalue {,,,, i64} %0, 4 store i64 %2, i64* %outvl @@ -679,7 +679,7 @@ } declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1i32(i32* , i64) -declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1i32(,,,,, i32*, , i64) +declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1i32(,,,,, i32*, , i64, i64) define @test_vlseg5ff_nxv1i32(i32* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv1i32: @@ -705,14 +705,14 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg5e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1i32( %val, %val, %val, %val, %val, i32* %base, %mask, i64 %vl) + %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1i32( %val, %val, %val, %val, %val, i32* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,, i64} %0, 1 %2 = extractvalue {,,,,, i64} %0, 5 store i64 %2, i64* %outvl @@ -720,7 +720,7 @@ } declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1i32(i32* , i64) -declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1i32(,,,,,, i32*, , i64) +declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1i32(,,,,,, i32*, , i64, i64) define @test_vlseg6ff_nxv1i32(i32* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv1i32: @@ -747,14 +747,14 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg6e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1i32( %val, %val, %val, %val, %val, %val, i32* %base, %mask, i64 %vl) + %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1i32( %val, %val, %val, %val, %val, %val, i32* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,, i64} %0, 6 store i64 %2, i64* %outvl @@ -762,7 +762,7 @@ } declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1i32(i32* , i64) -declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1i32(,,,,,,, i32*, , i64) +declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1i32(,,,,,,, i32*, , i64, i64) define @test_vlseg7ff_nxv1i32(i32* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv1i32: @@ -790,14 +790,14 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg7e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %mask, i64 %vl) + %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,,, i64} %0, 7 store i64 %2, i64* %outvl @@ -805,7 +805,7 @@ } declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1i32(i32* , i64) -declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1i32(,,,,,,,, i32*, , i64) +declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1i32(,,,,,,,, i32*, , i64, i64) define @test_vlseg8ff_nxv1i32(i32* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv1i32: @@ -834,14 +834,14 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg8e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %mask, i64 %vl) + %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,,,, i64} %0, 8 store i64 %2, i64* %outvl @@ -849,7 +849,7 @@ } declare {,, i64} @llvm.riscv.vlseg2ff.nxv8i16(i16* , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8i16(,, i16*, , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8i16(,, i16*, , i64, i64) define @test_vlseg2ff_nxv8i16(i16* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv8i16: @@ -872,14 +872,14 @@ ; CHECK-LABEL: test_vlseg2ff_mask_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8i16( %val, %val, i16* %base, %mask, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8i16( %val, %val, i16* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,, i64} %0, 1 %2 = extractvalue {,, i64} %0, 2 store i64 %2, i64* %outvl @@ -887,7 +887,7 @@ } declare {,,, i64} @llvm.riscv.vlseg3ff.nxv8i16(i16* , i64) -declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv8i16(,,, i16*, , i64) +declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv8i16(,,, i16*, , i64, i64) define @test_vlseg3ff_nxv8i16(i16* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv8i16: @@ -911,14 +911,14 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vlseg3e16ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv8i16( %val, %val, %val, i16* %base, %mask, i64 %vl) + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv8i16( %val, %val, %val, i16* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,, i64} %0, 1 %2 = extractvalue {,,, i64} %0, 3 store i64 %2, i64* %outvl @@ -926,7 +926,7 @@ } declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv8i16(i16* , i64) -declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv8i16(,,,, i16*, , i64) +declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv8i16(,,,, i16*, , i64, i64) define @test_vlseg4ff_nxv8i16(i16* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv8i16: @@ -951,14 +951,14 @@ ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vlseg4e16ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv8i16( %val, %val, %val, %val, i16* %base, %mask, i64 %vl) + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv8i16( %val, %val, %val, %val, i16* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,, i64} %0, 1 %2 = extractvalue {,,,, i64} %0, 4 store i64 %2, i64* %outvl @@ -966,7 +966,7 @@ } declare {,, i64} @llvm.riscv.vlseg2ff.nxv4i8(i8* , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i8(,, i8*, , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i8(,, i8*, , i64, i64) define @test_vlseg2ff_nxv4i8(i8* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv4i8: @@ -989,14 +989,14 @@ ; CHECK-LABEL: test_vlseg2ff_mask_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vlseg2e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i8( %val, %val, i8* %base, %mask, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i8( %val, %val, i8* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,, i64} %0, 1 %2 = extractvalue {,, i64} %0, 2 store i64 %2, i64* %outvl @@ -1004,7 +1004,7 @@ } declare {,,, i64} @llvm.riscv.vlseg3ff.nxv4i8(i8* , i64) -declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv4i8(,,, i8*, , i64) +declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv4i8(,,, i8*, , i64, i64) define @test_vlseg3ff_nxv4i8(i8* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv4i8: @@ -1028,14 +1028,14 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vlseg3e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv4i8( %val, %val, %val, i8* %base, %mask, i64 %vl) + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv4i8( %val, %val, %val, i8* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,, i64} %0, 1 %2 = extractvalue {,,, i64} %0, 3 store i64 %2, i64* %outvl @@ -1043,7 +1043,7 @@ } declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv4i8(i8* , i64) -declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv4i8(,,,, i8*, , i64) +declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv4i8(,,,, i8*, , i64, i64) define @test_vlseg4ff_nxv4i8(i8* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv4i8: @@ -1068,14 +1068,14 @@ ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vlseg4e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv4i8( %val, %val, %val, %val, i8* %base, %mask, i64 %vl) + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv4i8( %val, %val, %val, %val, i8* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,, i64} %0, 1 %2 = extractvalue {,,,, i64} %0, 4 store i64 %2, i64* %outvl @@ -1083,7 +1083,7 @@ } declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv4i8(i8* , i64) -declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv4i8(,,,,, i8*, , i64) +declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv4i8(,,,,, i8*, , i64, i64) define @test_vlseg5ff_nxv4i8(i8* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv4i8: @@ -1109,14 +1109,14 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vlseg5e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv4i8( %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl) + %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv4i8( %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,, i64} %0, 1 %2 = extractvalue {,,,,, i64} %0, 5 store i64 %2, i64* %outvl @@ -1124,7 +1124,7 @@ } declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv4i8(i8* , i64) -declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv4i8(,,,,,, i8*, , i64) +declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv4i8(,,,,,, i8*, , i64, i64) define @test_vlseg6ff_nxv4i8(i8* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv4i8: @@ -1151,14 +1151,14 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vlseg6e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl) + %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,, i64} %0, 6 store i64 %2, i64* %outvl @@ -1166,7 +1166,7 @@ } declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv4i8(i8* , i64) -declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv4i8(,,,,,,, i8*, , i64) +declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv4i8(,,,,,,, i8*, , i64, i64) define @test_vlseg7ff_nxv4i8(i8* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv4i8: @@ -1194,14 +1194,14 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vlseg7e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl) + %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,,, i64} %0, 7 store i64 %2, i64* %outvl @@ -1209,7 +1209,7 @@ } declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv4i8(i8* , i64) -declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv4i8(,,,,,,,, i8*, , i64) +declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv4i8(,,,,,,,, i8*, , i64, i64) define @test_vlseg8ff_nxv4i8(i8* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv4i8: @@ -1238,14 +1238,14 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vlseg8e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl) + %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,,,, i64} %0, 8 store i64 %2, i64* %outvl @@ -1253,7 +1253,7 @@ } declare {,, i64} @llvm.riscv.vlseg2ff.nxv1i16(i16* , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1i16(,, i16*, , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1i16(,, i16*, , i64, i64) define @test_vlseg2ff_nxv1i16(i16* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv1i16: @@ -1276,14 +1276,14 @@ ; CHECK-LABEL: test_vlseg2ff_mask_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1i16( %val, %val, i16* %base, %mask, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1i16( %val, %val, i16* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,, i64} %0, 1 %2 = extractvalue {,, i64} %0, 2 store i64 %2, i64* %outvl @@ -1291,7 +1291,7 @@ } declare {,,, i64} @llvm.riscv.vlseg3ff.nxv1i16(i16* , i64) -declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1i16(,,, i16*, , i64) +declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1i16(,,, i16*, , i64, i64) define @test_vlseg3ff_nxv1i16(i16* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv1i16: @@ -1315,14 +1315,14 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1i16( %val, %val, %val, i16* %base, %mask, i64 %vl) + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1i16( %val, %val, %val, i16* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,, i64} %0, 1 %2 = extractvalue {,,, i64} %0, 3 store i64 %2, i64* %outvl @@ -1330,7 +1330,7 @@ } declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv1i16(i16* , i64) -declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1i16(,,,, i16*, , i64) +declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1i16(,,,, i16*, , i64, i64) define @test_vlseg4ff_nxv1i16(i16* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv1i16: @@ -1355,14 +1355,14 @@ ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1i16( %val, %val, %val, %val, i16* %base, %mask, i64 %vl) + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1i16( %val, %val, %val, %val, i16* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,, i64} %0, 1 %2 = extractvalue {,,,, i64} %0, 4 store i64 %2, i64* %outvl @@ -1370,7 +1370,7 @@ } declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1i16(i16* , i64) -declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1i16(,,,,, i16*, , i64) +declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1i16(,,,,, i16*, , i64, i64) define @test_vlseg5ff_nxv1i16(i16* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv1i16: @@ -1396,14 +1396,14 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1i16( %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl) + %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1i16( %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,, i64} %0, 1 %2 = extractvalue {,,,,, i64} %0, 5 store i64 %2, i64* %outvl @@ -1411,7 +1411,7 @@ } declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1i16(i16* , i64) -declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1i16(,,,,,, i16*, , i64) +declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1i16(,,,,,, i16*, , i64, i64) define @test_vlseg6ff_nxv1i16(i16* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv1i16: @@ -1438,14 +1438,14 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1i16( %val, %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl) + %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1i16( %val, %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,, i64} %0, 6 store i64 %2, i64* %outvl @@ -1453,7 +1453,7 @@ } declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1i16(i16* , i64) -declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1i16(,,,,,,, i16*, , i64) +declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1i16(,,,,,,, i16*, , i64, i64) define @test_vlseg7ff_nxv1i16(i16* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv1i16: @@ -1481,14 +1481,14 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl) + %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,,, i64} %0, 7 store i64 %2, i64* %outvl @@ -1496,7 +1496,7 @@ } declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1i16(i16* , i64) -declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1i16(,,,,,,,, i16*, , i64) +declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1i16(,,,,,,,, i16*, , i64, i64) define @test_vlseg8ff_nxv1i16(i16* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv1i16: @@ -1525,14 +1525,14 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl) + %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,,,, i64} %0, 8 store i64 %2, i64* %outvl @@ -1540,7 +1540,7 @@ } declare {,, i64} @llvm.riscv.vlseg2ff.nxv2i32(i32* , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2i32(,, i32*, , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2i32(,, i32*, , i64, i64) define @test_vlseg2ff_nxv2i32(i32* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv2i32: @@ -1563,14 +1563,14 @@ ; CHECK-LABEL: test_vlseg2ff_mask_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg2e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2i32( %val, %val, i32* %base, %mask, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2i32( %val, %val, i32* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,, i64} %0, 1 %2 = extractvalue {,, i64} %0, 2 store i64 %2, i64* %outvl @@ -1578,7 +1578,7 @@ } declare {,,, i64} @llvm.riscv.vlseg3ff.nxv2i32(i32* , i64) -declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2i32(,,, i32*, , i64) +declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2i32(,,, i32*, , i64, i64) define @test_vlseg3ff_nxv2i32(i32* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv2i32: @@ -1602,14 +1602,14 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg3e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2i32( %val, %val, %val, i32* %base, %mask, i64 %vl) + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2i32( %val, %val, %val, i32* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,, i64} %0, 1 %2 = extractvalue {,,, i64} %0, 3 store i64 %2, i64* %outvl @@ -1617,7 +1617,7 @@ } declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv2i32(i32* , i64) -declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2i32(,,,, i32*, , i64) +declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2i32(,,,, i32*, , i64, i64) define @test_vlseg4ff_nxv2i32(i32* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv2i32: @@ -1642,14 +1642,14 @@ ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg4e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2i32( %val, %val, %val, %val, i32* %base, %mask, i64 %vl) + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2i32( %val, %val, %val, %val, i32* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,, i64} %0, 1 %2 = extractvalue {,,,, i64} %0, 4 store i64 %2, i64* %outvl @@ -1657,7 +1657,7 @@ } declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv2i32(i32* , i64) -declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv2i32(,,,,, i32*, , i64) +declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv2i32(,,,,, i32*, , i64, i64) define @test_vlseg5ff_nxv2i32(i32* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv2i32: @@ -1683,14 +1683,14 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg5e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv2i32( %val, %val, %val, %val, %val, i32* %base, %mask, i64 %vl) + %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv2i32( %val, %val, %val, %val, %val, i32* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,, i64} %0, 1 %2 = extractvalue {,,,,, i64} %0, 5 store i64 %2, i64* %outvl @@ -1698,7 +1698,7 @@ } declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv2i32(i32* , i64) -declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv2i32(,,,,,, i32*, , i64) +declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv2i32(,,,,,, i32*, , i64, i64) define @test_vlseg6ff_nxv2i32(i32* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv2i32: @@ -1725,14 +1725,14 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg6e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv2i32( %val, %val, %val, %val, %val, %val, i32* %base, %mask, i64 %vl) + %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv2i32( %val, %val, %val, %val, %val, %val, i32* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,, i64} %0, 6 store i64 %2, i64* %outvl @@ -1740,7 +1740,7 @@ } declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv2i32(i32* , i64) -declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv2i32(,,,,,,, i32*, , i64) +declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv2i32(,,,,,,, i32*, , i64, i64) define @test_vlseg7ff_nxv2i32(i32* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv2i32: @@ -1768,14 +1768,14 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg7e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %mask, i64 %vl) + %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,,, i64} %0, 7 store i64 %2, i64* %outvl @@ -1783,7 +1783,7 @@ } declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv2i32(i32* , i64) -declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv2i32(,,,,,,,, i32*, , i64) +declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv2i32(,,,,,,,, i32*, , i64, i64) define @test_vlseg8ff_nxv2i32(i32* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv2i32: @@ -1812,14 +1812,14 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg8e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %mask, i64 %vl) + %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,,,, i64} %0, 8 store i64 %2, i64* %outvl @@ -1827,7 +1827,7 @@ } declare {,, i64} @llvm.riscv.vlseg2ff.nxv8i8(i8* , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8i8(,, i8*, , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8i8(,, i8*, , i64, i64) define @test_vlseg2ff_nxv8i8(i8* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv8i8: @@ -1850,14 +1850,14 @@ ; CHECK-LABEL: test_vlseg2ff_mask_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vlseg2e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8i8( %val, %val, i8* %base, %mask, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8i8( %val, %val, i8* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,, i64} %0, 1 %2 = extractvalue {,, i64} %0, 2 store i64 %2, i64* %outvl @@ -1865,7 +1865,7 @@ } declare {,,, i64} @llvm.riscv.vlseg3ff.nxv8i8(i8* , i64) -declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv8i8(,,, i8*, , i64) +declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv8i8(,,, i8*, , i64, i64) define @test_vlseg3ff_nxv8i8(i8* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv8i8: @@ -1889,14 +1889,14 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vlseg3e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv8i8( %val, %val, %val, i8* %base, %mask, i64 %vl) + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv8i8( %val, %val, %val, i8* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,, i64} %0, 1 %2 = extractvalue {,,, i64} %0, 3 store i64 %2, i64* %outvl @@ -1904,7 +1904,7 @@ } declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv8i8(i8* , i64) -declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv8i8(,,,, i8*, , i64) +declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv8i8(,,,, i8*, , i64, i64) define @test_vlseg4ff_nxv8i8(i8* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv8i8: @@ -1929,14 +1929,14 @@ ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vlseg4e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv8i8( %val, %val, %val, %val, i8* %base, %mask, i64 %vl) + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv8i8( %val, %val, %val, %val, i8* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,, i64} %0, 1 %2 = extractvalue {,,,, i64} %0, 4 store i64 %2, i64* %outvl @@ -1944,7 +1944,7 @@ } declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv8i8(i8* , i64) -declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv8i8(,,,,, i8*, , i64) +declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv8i8(,,,,, i8*, , i64, i64) define @test_vlseg5ff_nxv8i8(i8* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv8i8: @@ -1970,14 +1970,14 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vlseg5e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv8i8( %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl) + %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv8i8( %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,, i64} %0, 1 %2 = extractvalue {,,,,, i64} %0, 5 store i64 %2, i64* %outvl @@ -1985,7 +1985,7 @@ } declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv8i8(i8* , i64) -declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv8i8(,,,,,, i8*, , i64) +declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv8i8(,,,,,, i8*, , i64, i64) define @test_vlseg6ff_nxv8i8(i8* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv8i8: @@ -2012,14 +2012,14 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vlseg6e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl) + %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,, i64} %0, 6 store i64 %2, i64* %outvl @@ -2027,7 +2027,7 @@ } declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv8i8(i8* , i64) -declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv8i8(,,,,,,, i8*, , i64) +declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv8i8(,,,,,,, i8*, , i64, i64) define @test_vlseg7ff_nxv8i8(i8* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv8i8: @@ -2055,14 +2055,14 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vlseg7e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl) + %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,,, i64} %0, 7 store i64 %2, i64* %outvl @@ -2070,7 +2070,7 @@ } declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv8i8(i8* , i64) -declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv8i8(,,,,,,,, i8*, , i64) +declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv8i8(,,,,,,,, i8*, , i64, i64) define @test_vlseg8ff_nxv8i8(i8* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv8i8: @@ -2099,14 +2099,14 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vlseg8e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl) + %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,,,, i64} %0, 8 store i64 %2, i64* %outvl @@ -2114,7 +2114,7 @@ } declare {,, i64} @llvm.riscv.vlseg2ff.nxv4i64(i64* , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i64(,, i64*, , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i64(,, i64*, , i64, i64) define @test_vlseg2ff_nxv4i64(i64* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv4i64: @@ -2137,14 +2137,14 @@ ; CHECK-LABEL: test_vlseg2ff_mask_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vlseg2e64ff.v v4, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i64( %val, %val, i64* %base, %mask, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i64( %val, %val, i64* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,, i64} %0, 1 %2 = extractvalue {,, i64} %0, 2 store i64 %2, i64* %outvl @@ -2152,7 +2152,7 @@ } declare {,, i64} @llvm.riscv.vlseg2ff.nxv4i16(i16* , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i16(,, i16*, , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i16(,, i16*, , i64, i64) define @test_vlseg2ff_nxv4i16(i16* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv4i16: @@ -2175,14 +2175,14 @@ ; CHECK-LABEL: test_vlseg2ff_mask_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i16( %val, %val, i16* %base, %mask, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i16( %val, %val, i16* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,, i64} %0, 1 %2 = extractvalue {,, i64} %0, 2 store i64 %2, i64* %outvl @@ -2190,7 +2190,7 @@ } declare {,,, i64} @llvm.riscv.vlseg3ff.nxv4i16(i16* , i64) -declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv4i16(,,, i16*, , i64) +declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv4i16(,,, i16*, , i64, i64) define @test_vlseg3ff_nxv4i16(i16* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv4i16: @@ -2214,14 +2214,14 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv4i16( %val, %val, %val, i16* %base, %mask, i64 %vl) + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv4i16( %val, %val, %val, i16* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,, i64} %0, 1 %2 = extractvalue {,,, i64} %0, 3 store i64 %2, i64* %outvl @@ -2229,7 +2229,7 @@ } declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv4i16(i16* , i64) -declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv4i16(,,,, i16*, , i64) +declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv4i16(,,,, i16*, , i64, i64) define @test_vlseg4ff_nxv4i16(i16* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv4i16: @@ -2254,14 +2254,14 @@ ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv4i16( %val, %val, %val, %val, i16* %base, %mask, i64 %vl) + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv4i16( %val, %val, %val, %val, i16* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,, i64} %0, 1 %2 = extractvalue {,,,, i64} %0, 4 store i64 %2, i64* %outvl @@ -2269,7 +2269,7 @@ } declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv4i16(i16* , i64) -declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv4i16(,,,,, i16*, , i64) +declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv4i16(,,,,, i16*, , i64, i64) define @test_vlseg5ff_nxv4i16(i16* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv4i16: @@ -2295,14 +2295,14 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv4i16( %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl) + %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv4i16( %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,, i64} %0, 1 %2 = extractvalue {,,,,, i64} %0, 5 store i64 %2, i64* %outvl @@ -2310,7 +2310,7 @@ } declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv4i16(i16* , i64) -declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv4i16(,,,,,, i16*, , i64) +declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv4i16(,,,,,, i16*, , i64, i64) define @test_vlseg6ff_nxv4i16(i16* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv4i16: @@ -2337,14 +2337,14 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv4i16( %val, %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl) + %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv4i16( %val, %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,, i64} %0, 6 store i64 %2, i64* %outvl @@ -2352,7 +2352,7 @@ } declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv4i16(i16* , i64) -declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv4i16(,,,,,,, i16*, , i64) +declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv4i16(,,,,,,, i16*, , i64, i64) define @test_vlseg7ff_nxv4i16(i16* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv4i16: @@ -2380,14 +2380,14 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl) + %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,,, i64} %0, 7 store i64 %2, i64* %outvl @@ -2395,7 +2395,7 @@ } declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv4i16(i16* , i64) -declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv4i16(,,,,,,,, i16*, , i64) +declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv4i16(,,,,,,,, i16*, , i64, i64) define @test_vlseg8ff_nxv4i16(i16* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv4i16: @@ -2424,14 +2424,14 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl) + %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,,,, i64} %0, 8 store i64 %2, i64* %outvl @@ -2439,7 +2439,7 @@ } declare {,, i64} @llvm.riscv.vlseg2ff.nxv1i8(i8* , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1i8(,, i8*, , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1i8(,, i8*, , i64, i64) define @test_vlseg2ff_nxv1i8(i8* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv1i8: @@ -2462,14 +2462,14 @@ ; CHECK-LABEL: test_vlseg2ff_mask_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vlseg2e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1i8( %val, %val, i8* %base, %mask, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1i8( %val, %val, i8* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,, i64} %0, 1 %2 = extractvalue {,, i64} %0, 2 store i64 %2, i64* %outvl @@ -2477,7 +2477,7 @@ } declare {,,, i64} @llvm.riscv.vlseg3ff.nxv1i8(i8* , i64) -declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1i8(,,, i8*, , i64) +declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1i8(,,, i8*, , i64, i64) define @test_vlseg3ff_nxv1i8(i8* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv1i8: @@ -2501,14 +2501,14 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vlseg3e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1i8( %val, %val, %val, i8* %base, %mask, i64 %vl) + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1i8( %val, %val, %val, i8* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,, i64} %0, 1 %2 = extractvalue {,,, i64} %0, 3 store i64 %2, i64* %outvl @@ -2516,7 +2516,7 @@ } declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv1i8(i8* , i64) -declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1i8(,,,, i8*, , i64) +declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1i8(,,,, i8*, , i64, i64) define @test_vlseg4ff_nxv1i8(i8* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv1i8: @@ -2541,14 +2541,14 @@ ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vlseg4e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1i8( %val, %val, %val, %val, i8* %base, %mask, i64 %vl) + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1i8( %val, %val, %val, %val, i8* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,, i64} %0, 1 %2 = extractvalue {,,,, i64} %0, 4 store i64 %2, i64* %outvl @@ -2556,7 +2556,7 @@ } declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1i8(i8* , i64) -declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1i8(,,,,, i8*, , i64) +declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1i8(,,,,, i8*, , i64, i64) define @test_vlseg5ff_nxv1i8(i8* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv1i8: @@ -2582,14 +2582,14 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vlseg5e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1i8( %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl) + %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1i8( %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,, i64} %0, 1 %2 = extractvalue {,,,,, i64} %0, 5 store i64 %2, i64* %outvl @@ -2597,7 +2597,7 @@ } declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1i8(i8* , i64) -declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1i8(,,,,,, i8*, , i64) +declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1i8(,,,,,, i8*, , i64, i64) define @test_vlseg6ff_nxv1i8(i8* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv1i8: @@ -2624,14 +2624,14 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vlseg6e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl) + %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,, i64} %0, 6 store i64 %2, i64* %outvl @@ -2639,7 +2639,7 @@ } declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1i8(i8* , i64) -declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1i8(,,,,,,, i8*, , i64) +declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1i8(,,,,,,, i8*, , i64, i64) define @test_vlseg7ff_nxv1i8(i8* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv1i8: @@ -2667,14 +2667,14 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vlseg7e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl) + %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,,, i64} %0, 7 store i64 %2, i64* %outvl @@ -2682,7 +2682,7 @@ } declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1i8(i8* , i64) -declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1i8(,,,,,,,, i8*, , i64) +declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1i8(,,,,,,,, i8*, , i64, i64) define @test_vlseg8ff_nxv1i8(i8* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv1i8: @@ -2711,14 +2711,14 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vlseg8e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl) + %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,,,, i64} %0, 8 store i64 %2, i64* %outvl @@ -2726,7 +2726,7 @@ } declare {,, i64} @llvm.riscv.vlseg2ff.nxv2i8(i8* , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2i8(,, i8*, , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2i8(,, i8*, , i64, i64) define @test_vlseg2ff_nxv2i8(i8* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv2i8: @@ -2749,14 +2749,14 @@ ; CHECK-LABEL: test_vlseg2ff_mask_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vlseg2e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2i8( %val, %val, i8* %base, %mask, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2i8( %val, %val, i8* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,, i64} %0, 1 %2 = extractvalue {,, i64} %0, 2 store i64 %2, i64* %outvl @@ -2764,7 +2764,7 @@ } declare {,,, i64} @llvm.riscv.vlseg3ff.nxv2i8(i8* , i64) -declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2i8(,,, i8*, , i64) +declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2i8(,,, i8*, , i64, i64) define @test_vlseg3ff_nxv2i8(i8* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv2i8: @@ -2788,14 +2788,14 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vlseg3e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2i8( %val, %val, %val, i8* %base, %mask, i64 %vl) + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2i8( %val, %val, %val, i8* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,, i64} %0, 1 %2 = extractvalue {,,, i64} %0, 3 store i64 %2, i64* %outvl @@ -2803,7 +2803,7 @@ } declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv2i8(i8* , i64) -declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2i8(,,,, i8*, , i64) +declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2i8(,,,, i8*, , i64, i64) define @test_vlseg4ff_nxv2i8(i8* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv2i8: @@ -2828,14 +2828,14 @@ ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vlseg4e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2i8( %val, %val, %val, %val, i8* %base, %mask, i64 %vl) + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2i8( %val, %val, %val, %val, i8* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,, i64} %0, 1 %2 = extractvalue {,,,, i64} %0, 4 store i64 %2, i64* %outvl @@ -2843,7 +2843,7 @@ } declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv2i8(i8* , i64) -declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv2i8(,,,,, i8*, , i64) +declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv2i8(,,,,, i8*, , i64, i64) define @test_vlseg5ff_nxv2i8(i8* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv2i8: @@ -2869,14 +2869,14 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vlseg5e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv2i8( %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl) + %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv2i8( %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,, i64} %0, 1 %2 = extractvalue {,,,,, i64} %0, 5 store i64 %2, i64* %outvl @@ -2884,7 +2884,7 @@ } declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv2i8(i8* , i64) -declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv2i8(,,,,,, i8*, , i64) +declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv2i8(,,,,,, i8*, , i64, i64) define @test_vlseg6ff_nxv2i8(i8* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv2i8: @@ -2911,14 +2911,14 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vlseg6e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl) + %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,, i64} %0, 6 store i64 %2, i64* %outvl @@ -2926,7 +2926,7 @@ } declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv2i8(i8* , i64) -declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv2i8(,,,,,,, i8*, , i64) +declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv2i8(,,,,,,, i8*, , i64, i64) define @test_vlseg7ff_nxv2i8(i8* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv2i8: @@ -2954,14 +2954,14 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vlseg7e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl) + %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,,, i64} %0, 7 store i64 %2, i64* %outvl @@ -2969,7 +2969,7 @@ } declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv2i8(i8* , i64) -declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv2i8(,,,,,,,, i8*, , i64) +declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv2i8(,,,,,,,, i8*, , i64, i64) define @test_vlseg8ff_nxv2i8(i8* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv2i8: @@ -2998,14 +2998,14 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vlseg8e8ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl) + %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,,,, i64} %0, 8 store i64 %2, i64* %outvl @@ -3013,7 +3013,7 @@ } declare {,, i64} @llvm.riscv.vlseg2ff.nxv8i32(i32* , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8i32(,, i32*, , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8i32(,, i32*, , i64, i64) define @test_vlseg2ff_nxv8i32(i32* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv8i32: @@ -3036,14 +3036,14 @@ ; CHECK-LABEL: test_vlseg2ff_mask_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vlseg2e32ff.v v4, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8i32( %val, %val, i32* %base, %mask, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8i32( %val, %val, i32* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,, i64} %0, 1 %2 = extractvalue {,, i64} %0, 2 store i64 %2, i64* %outvl @@ -3051,7 +3051,7 @@ } declare {,, i64} @llvm.riscv.vlseg2ff.nxv32i8(i8* , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv32i8(,, i8*, , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv32i8(,, i8*, , i64, i64) define @test_vlseg2ff_nxv32i8(i8* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv32i8: @@ -3074,14 +3074,14 @@ ; CHECK-LABEL: test_vlseg2ff_mask_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vlseg2e8ff.v v4, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv32i8( %val, %val, i8* %base, %mask, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv32i8( %val, %val, i8* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,, i64} %0, 1 %2 = extractvalue {,, i64} %0, 2 store i64 %2, i64* %outvl @@ -3089,7 +3089,7 @@ } declare {,, i64} @llvm.riscv.vlseg2ff.nxv2i16(i16* , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2i16(,, i16*, , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2i16(,, i16*, , i64, i64) define @test_vlseg2ff_nxv2i16(i16* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv2i16: @@ -3112,14 +3112,14 @@ ; CHECK-LABEL: test_vlseg2ff_mask_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2i16( %val, %val, i16* %base, %mask, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2i16( %val, %val, i16* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,, i64} %0, 1 %2 = extractvalue {,, i64} %0, 2 store i64 %2, i64* %outvl @@ -3127,7 +3127,7 @@ } declare {,,, i64} @llvm.riscv.vlseg3ff.nxv2i16(i16* , i64) -declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2i16(,,, i16*, , i64) +declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2i16(,,, i16*, , i64, i64) define @test_vlseg3ff_nxv2i16(i16* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv2i16: @@ -3151,14 +3151,14 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2i16( %val, %val, %val, i16* %base, %mask, i64 %vl) + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2i16( %val, %val, %val, i16* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,, i64} %0, 1 %2 = extractvalue {,,, i64} %0, 3 store i64 %2, i64* %outvl @@ -3166,7 +3166,7 @@ } declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv2i16(i16* , i64) -declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2i16(,,,, i16*, , i64) +declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2i16(,,,, i16*, , i64, i64) define @test_vlseg4ff_nxv2i16(i16* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv2i16: @@ -3191,14 +3191,14 @@ ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2i16( %val, %val, %val, %val, i16* %base, %mask, i64 %vl) + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2i16( %val, %val, %val, %val, i16* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,, i64} %0, 1 %2 = extractvalue {,,,, i64} %0, 4 store i64 %2, i64* %outvl @@ -3206,7 +3206,7 @@ } declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv2i16(i16* , i64) -declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv2i16(,,,,, i16*, , i64) +declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv2i16(,,,,, i16*, , i64, i64) define @test_vlseg5ff_nxv2i16(i16* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv2i16: @@ -3232,14 +3232,14 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv2i16( %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl) + %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv2i16( %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,, i64} %0, 1 %2 = extractvalue {,,,,, i64} %0, 5 store i64 %2, i64* %outvl @@ -3247,7 +3247,7 @@ } declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv2i16(i16* , i64) -declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv2i16(,,,,,, i16*, , i64) +declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv2i16(,,,,,, i16*, , i64, i64) define @test_vlseg6ff_nxv2i16(i16* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv2i16: @@ -3274,14 +3274,14 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv2i16( %val, %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl) + %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv2i16( %val, %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,, i64} %0, 6 store i64 %2, i64* %outvl @@ -3289,7 +3289,7 @@ } declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv2i16(i16* , i64) -declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv2i16(,,,,,,, i16*, , i64) +declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv2i16(,,,,,,, i16*, , i64, i64) define @test_vlseg7ff_nxv2i16(i16* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv2i16: @@ -3317,14 +3317,14 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl) + %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,,, i64} %0, 7 store i64 %2, i64* %outvl @@ -3332,7 +3332,7 @@ } declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv2i16(i16* , i64) -declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv2i16(,,,,,,,, i16*, , i64) +declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv2i16(,,,,,,,, i16*, , i64, i64) define @test_vlseg8ff_nxv2i16(i16* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv2i16: @@ -3361,14 +3361,14 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl) + %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,,,, i64} %0, 8 store i64 %2, i64* %outvl @@ -3376,7 +3376,7 @@ } declare {,, i64} @llvm.riscv.vlseg2ff.nxv2i64(i64* , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2i64(,, i64*, , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2i64(,, i64*, , i64, i64) define @test_vlseg2ff_nxv2i64(i64* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv2i64: @@ -3399,14 +3399,14 @@ ; CHECK-LABEL: test_vlseg2ff_mask_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vlseg2e64ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2i64( %val, %val, i64* %base, %mask, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2i64( %val, %val, i64* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,, i64} %0, 1 %2 = extractvalue {,, i64} %0, 2 store i64 %2, i64* %outvl @@ -3414,7 +3414,7 @@ } declare {,,, i64} @llvm.riscv.vlseg3ff.nxv2i64(i64* , i64) -declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2i64(,,, i64*, , i64) +declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2i64(,,, i64*, , i64, i64) define @test_vlseg3ff_nxv2i64(i64* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv2i64: @@ -3438,14 +3438,14 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vlseg3e64ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2i64( %val, %val, %val, i64* %base, %mask, i64 %vl) + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2i64( %val, %val, %val, i64* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,, i64} %0, 1 %2 = extractvalue {,,, i64} %0, 3 store i64 %2, i64* %outvl @@ -3453,7 +3453,7 @@ } declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv2i64(i64* , i64) -declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2i64(,,,, i64*, , i64) +declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2i64(,,,, i64*, , i64, i64) define @test_vlseg4ff_nxv2i64(i64* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv2i64: @@ -3478,14 +3478,14 @@ ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vlseg4e64ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2i64( %val, %val, %val, %val, i64* %base, %mask, i64 %vl) + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2i64( %val, %val, %val, %val, i64* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,, i64} %0, 1 %2 = extractvalue {,,,, i64} %0, 4 store i64 %2, i64* %outvl @@ -3493,7 +3493,7 @@ } declare {,, i64} @llvm.riscv.vlseg2ff.nxv16f16(half* , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16f16(,, half*, , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16f16(,, half*, , i64, i64) define @test_vlseg2ff_nxv16f16(half* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv16f16: @@ -3516,14 +3516,14 @@ ; CHECK-LABEL: test_vlseg2ff_mask_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v4, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16f16( %val, %val, half* %base, %mask, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16f16( %val, %val, half* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,, i64} %0, 1 %2 = extractvalue {,, i64} %0, 2 store i64 %2, i64* %outvl @@ -3531,7 +3531,7 @@ } declare {,, i64} @llvm.riscv.vlseg2ff.nxv4f64(double* , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4f64(,, double*, , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4f64(,, double*, , i64, i64) define @test_vlseg2ff_nxv4f64(double* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv4f64: @@ -3554,14 +3554,14 @@ ; CHECK-LABEL: test_vlseg2ff_mask_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vlseg2e64ff.v v4, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4f64( %val, %val, double* %base, %mask, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4f64( %val, %val, double* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,, i64} %0, 1 %2 = extractvalue {,, i64} %0, 2 store i64 %2, i64* %outvl @@ -3569,7 +3569,7 @@ } declare {,, i64} @llvm.riscv.vlseg2ff.nxv1f64(double* , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1f64(,, double*, , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1f64(,, double*, , i64, i64) define @test_vlseg2ff_nxv1f64(double* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv1f64: @@ -3592,14 +3592,14 @@ ; CHECK-LABEL: test_vlseg2ff_mask_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg2e64ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1f64( %val, %val, double* %base, %mask, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1f64( %val, %val, double* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,, i64} %0, 1 %2 = extractvalue {,, i64} %0, 2 store i64 %2, i64* %outvl @@ -3607,7 +3607,7 @@ } declare {,,, i64} @llvm.riscv.vlseg3ff.nxv1f64(double* , i64) -declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1f64(,,, double*, , i64) +declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1f64(,,, double*, , i64, i64) define @test_vlseg3ff_nxv1f64(double* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv1f64: @@ -3631,14 +3631,14 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg3e64ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1f64( %val, %val, %val, double* %base, %mask, i64 %vl) + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1f64( %val, %val, %val, double* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,, i64} %0, 1 %2 = extractvalue {,,, i64} %0, 3 store i64 %2, i64* %outvl @@ -3646,7 +3646,7 @@ } declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv1f64(double* , i64) -declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1f64(,,,, double*, , i64) +declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1f64(,,,, double*, , i64, i64) define @test_vlseg4ff_nxv1f64(double* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv1f64: @@ -3671,14 +3671,14 @@ ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg4e64ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1f64( %val, %val, %val, %val, double* %base, %mask, i64 %vl) + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1f64( %val, %val, %val, %val, double* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,, i64} %0, 1 %2 = extractvalue {,,,, i64} %0, 4 store i64 %2, i64* %outvl @@ -3686,7 +3686,7 @@ } declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1f64(double* , i64) -declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1f64(,,,,, double*, , i64) +declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1f64(,,,,, double*, , i64, i64) define @test_vlseg5ff_nxv1f64(double* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv1f64: @@ -3712,14 +3712,14 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg5e64ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1f64( %val, %val, %val, %val, %val, double* %base, %mask, i64 %vl) + %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1f64( %val, %val, %val, %val, %val, double* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,, i64} %0, 1 %2 = extractvalue {,,,,, i64} %0, 5 store i64 %2, i64* %outvl @@ -3727,7 +3727,7 @@ } declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1f64(double* , i64) -declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1f64(,,,,,, double*, , i64) +declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1f64(,,,,,, double*, , i64, i64) define @test_vlseg6ff_nxv1f64(double* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv1f64: @@ -3754,14 +3754,14 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg6e64ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1f64( %val, %val, %val, %val, %val, %val, double* %base, %mask, i64 %vl) + %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1f64( %val, %val, %val, %val, %val, %val, double* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,, i64} %0, 6 store i64 %2, i64* %outvl @@ -3769,7 +3769,7 @@ } declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1f64(double* , i64) -declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1f64(,,,,,,, double*, , i64) +declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1f64(,,,,,,, double*, , i64, i64) define @test_vlseg7ff_nxv1f64(double* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv1f64: @@ -3797,14 +3797,14 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg7e64ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1f64( %val, %val, %val, %val, %val, %val, %val, double* %base, %mask, i64 %vl) + %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1f64( %val, %val, %val, %val, %val, %val, %val, double* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,,, i64} %0, 7 store i64 %2, i64* %outvl @@ -3812,7 +3812,7 @@ } declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1f64(double* , i64) -declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1f64(,,,,,,,, double*, , i64) +declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1f64(,,,,,,,, double*, , i64, i64) define @test_vlseg8ff_nxv1f64(double* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv1f64: @@ -3841,14 +3841,14 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg8e64ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1f64( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %mask, i64 %vl) + %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1f64( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,,,, i64} %0, 8 store i64 %2, i64* %outvl @@ -3856,7 +3856,7 @@ } declare {,, i64} @llvm.riscv.vlseg2ff.nxv2f32(float* , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2f32(,, float*, , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2f32(,, float*, , i64, i64) define @test_vlseg2ff_nxv2f32(float* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv2f32: @@ -3879,14 +3879,14 @@ ; CHECK-LABEL: test_vlseg2ff_mask_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg2e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2f32( %val, %val, float* %base, %mask, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2f32( %val, %val, float* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,, i64} %0, 1 %2 = extractvalue {,, i64} %0, 2 store i64 %2, i64* %outvl @@ -3894,7 +3894,7 @@ } declare {,,, i64} @llvm.riscv.vlseg3ff.nxv2f32(float* , i64) -declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2f32(,,, float*, , i64) +declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2f32(,,, float*, , i64, i64) define @test_vlseg3ff_nxv2f32(float* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv2f32: @@ -3918,14 +3918,14 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg3e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2f32( %val, %val, %val, float* %base, %mask, i64 %vl) + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2f32( %val, %val, %val, float* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,, i64} %0, 1 %2 = extractvalue {,,, i64} %0, 3 store i64 %2, i64* %outvl @@ -3933,7 +3933,7 @@ } declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv2f32(float* , i64) -declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2f32(,,,, float*, , i64) +declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2f32(,,,, float*, , i64, i64) define @test_vlseg4ff_nxv2f32(float* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv2f32: @@ -3958,14 +3958,14 @@ ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg4e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2f32( %val, %val, %val, %val, float* %base, %mask, i64 %vl) + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2f32( %val, %val, %val, %val, float* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,, i64} %0, 1 %2 = extractvalue {,,,, i64} %0, 4 store i64 %2, i64* %outvl @@ -3973,7 +3973,7 @@ } declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv2f32(float* , i64) -declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv2f32(,,,,, float*, , i64) +declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv2f32(,,,,, float*, , i64, i64) define @test_vlseg5ff_nxv2f32(float* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv2f32: @@ -3999,14 +3999,14 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg5e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv2f32( %val, %val, %val, %val, %val, float* %base, %mask, i64 %vl) + %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv2f32( %val, %val, %val, %val, %val, float* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,, i64} %0, 1 %2 = extractvalue {,,,,, i64} %0, 5 store i64 %2, i64* %outvl @@ -4014,7 +4014,7 @@ } declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv2f32(float* , i64) -declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv2f32(,,,,,, float*, , i64) +declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv2f32(,,,,,, float*, , i64, i64) define @test_vlseg6ff_nxv2f32(float* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv2f32: @@ -4041,14 +4041,14 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg6e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv2f32( %val, %val, %val, %val, %val, %val, float* %base, %mask, i64 %vl) + %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv2f32( %val, %val, %val, %val, %val, %val, float* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,, i64} %0, 6 store i64 %2, i64* %outvl @@ -4056,7 +4056,7 @@ } declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv2f32(float* , i64) -declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv2f32(,,,,,,, float*, , i64) +declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv2f32(,,,,,,, float*, , i64, i64) define @test_vlseg7ff_nxv2f32(float* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv2f32: @@ -4084,14 +4084,14 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg7e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv2f32( %val, %val, %val, %val, %val, %val, %val, float* %base, %mask, i64 %vl) + %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv2f32( %val, %val, %val, %val, %val, %val, %val, float* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,,, i64} %0, 7 store i64 %2, i64* %outvl @@ -4099,7 +4099,7 @@ } declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv2f32(float* , i64) -declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv2f32(,,,,,,,, float*, , i64) +declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv2f32(,,,,,,,, float*, , i64, i64) define @test_vlseg8ff_nxv2f32(float* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv2f32: @@ -4128,14 +4128,14 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg8e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv2f32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %mask, i64 %vl) + %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv2f32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,,,, i64} %0, 8 store i64 %2, i64* %outvl @@ -4143,7 +4143,7 @@ } declare {,, i64} @llvm.riscv.vlseg2ff.nxv1f16(half* , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1f16(,, half*, , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1f16(,, half*, , i64, i64) define @test_vlseg2ff_nxv1f16(half* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv1f16: @@ -4166,14 +4166,14 @@ ; CHECK-LABEL: test_vlseg2ff_mask_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1f16( %val, %val, half* %base, %mask, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1f16( %val, %val, half* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,, i64} %0, 1 %2 = extractvalue {,, i64} %0, 2 store i64 %2, i64* %outvl @@ -4181,7 +4181,7 @@ } declare {,,, i64} @llvm.riscv.vlseg3ff.nxv1f16(half* , i64) -declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1f16(,,, half*, , i64) +declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1f16(,,, half*, , i64, i64) define @test_vlseg3ff_nxv1f16(half* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv1f16: @@ -4205,14 +4205,14 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1f16( %val, %val, %val, half* %base, %mask, i64 %vl) + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1f16( %val, %val, %val, half* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,, i64} %0, 1 %2 = extractvalue {,,, i64} %0, 3 store i64 %2, i64* %outvl @@ -4220,7 +4220,7 @@ } declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv1f16(half* , i64) -declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1f16(,,,, half*, , i64) +declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1f16(,,,, half*, , i64, i64) define @test_vlseg4ff_nxv1f16(half* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv1f16: @@ -4245,14 +4245,14 @@ ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1f16( %val, %val, %val, %val, half* %base, %mask, i64 %vl) + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1f16( %val, %val, %val, %val, half* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,, i64} %0, 1 %2 = extractvalue {,,,, i64} %0, 4 store i64 %2, i64* %outvl @@ -4260,7 +4260,7 @@ } declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1f16(half* , i64) -declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1f16(,,,,, half*, , i64) +declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1f16(,,,,, half*, , i64, i64) define @test_vlseg5ff_nxv1f16(half* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv1f16: @@ -4286,14 +4286,14 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1f16( %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl) + %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1f16( %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,, i64} %0, 1 %2 = extractvalue {,,,,, i64} %0, 5 store i64 %2, i64* %outvl @@ -4301,7 +4301,7 @@ } declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1f16(half* , i64) -declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1f16(,,,,,, half*, , i64) +declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1f16(,,,,,, half*, , i64, i64) define @test_vlseg6ff_nxv1f16(half* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv1f16: @@ -4328,14 +4328,14 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1f16( %val, %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl) + %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1f16( %val, %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,, i64} %0, 6 store i64 %2, i64* %outvl @@ -4343,7 +4343,7 @@ } declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1f16(half* , i64) -declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1f16(,,,,,,, half*, , i64) +declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1f16(,,,,,,, half*, , i64, i64) define @test_vlseg7ff_nxv1f16(half* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv1f16: @@ -4371,14 +4371,14 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1f16( %val, %val, %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl) + %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1f16( %val, %val, %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,,, i64} %0, 7 store i64 %2, i64* %outvl @@ -4386,7 +4386,7 @@ } declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1f16(half* , i64) -declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1f16(,,,,,,,, half*, , i64) +declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1f16(,,,,,,,, half*, , i64, i64) define @test_vlseg8ff_nxv1f16(half* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv1f16: @@ -4415,14 +4415,14 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1f16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl) + %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1f16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,,,, i64} %0, 8 store i64 %2, i64* %outvl @@ -4430,7 +4430,7 @@ } declare {,, i64} @llvm.riscv.vlseg2ff.nxv1f32(float* , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1f32(,, float*, , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1f32(,, float*, , i64, i64) define @test_vlseg2ff_nxv1f32(float* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv1f32: @@ -4453,14 +4453,14 @@ ; CHECK-LABEL: test_vlseg2ff_mask_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg2e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1f32( %val, %val, float* %base, %mask, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1f32( %val, %val, float* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,, i64} %0, 1 %2 = extractvalue {,, i64} %0, 2 store i64 %2, i64* %outvl @@ -4468,7 +4468,7 @@ } declare {,,, i64} @llvm.riscv.vlseg3ff.nxv1f32(float* , i64) -declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1f32(,,, float*, , i64) +declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1f32(,,, float*, , i64, i64) define @test_vlseg3ff_nxv1f32(float* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv1f32: @@ -4492,14 +4492,14 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg3e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1f32( %val, %val, %val, float* %base, %mask, i64 %vl) + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1f32( %val, %val, %val, float* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,, i64} %0, 1 %2 = extractvalue {,,, i64} %0, 3 store i64 %2, i64* %outvl @@ -4507,7 +4507,7 @@ } declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv1f32(float* , i64) -declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1f32(,,,, float*, , i64) +declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1f32(,,,, float*, , i64, i64) define @test_vlseg4ff_nxv1f32(float* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv1f32: @@ -4532,14 +4532,14 @@ ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg4e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1f32( %val, %val, %val, %val, float* %base, %mask, i64 %vl) + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1f32( %val, %val, %val, %val, float* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,, i64} %0, 1 %2 = extractvalue {,,,, i64} %0, 4 store i64 %2, i64* %outvl @@ -4547,7 +4547,7 @@ } declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1f32(float* , i64) -declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1f32(,,,,, float*, , i64) +declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1f32(,,,,, float*, , i64, i64) define @test_vlseg5ff_nxv1f32(float* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv1f32: @@ -4573,14 +4573,14 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg5e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1f32( %val, %val, %val, %val, %val, float* %base, %mask, i64 %vl) + %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1f32( %val, %val, %val, %val, %val, float* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,, i64} %0, 1 %2 = extractvalue {,,,,, i64} %0, 5 store i64 %2, i64* %outvl @@ -4588,7 +4588,7 @@ } declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1f32(float* , i64) -declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1f32(,,,,,, float*, , i64) +declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1f32(,,,,,, float*, , i64, i64) define @test_vlseg6ff_nxv1f32(float* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv1f32: @@ -4615,14 +4615,14 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg6e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1f32( %val, %val, %val, %val, %val, %val, float* %base, %mask, i64 %vl) + %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1f32( %val, %val, %val, %val, %val, %val, float* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,, i64} %0, 6 store i64 %2, i64* %outvl @@ -4630,7 +4630,7 @@ } declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1f32(float* , i64) -declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1f32(,,,,,,, float*, , i64) +declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1f32(,,,,,,, float*, , i64, i64) define @test_vlseg7ff_nxv1f32(float* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv1f32: @@ -4658,14 +4658,14 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg7e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1f32( %val, %val, %val, %val, %val, %val, %val, float* %base, %mask, i64 %vl) + %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1f32( %val, %val, %val, %val, %val, %val, %val, float* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,,, i64} %0, 7 store i64 %2, i64* %outvl @@ -4673,7 +4673,7 @@ } declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1f32(float* , i64) -declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1f32(,,,,,,,, float*, , i64) +declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1f32(,,,,,,,, float*, , i64, i64) define @test_vlseg8ff_nxv1f32(float* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv1f32: @@ -4702,14 +4702,14 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg8e32ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1f32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %mask, i64 %vl) + %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1f32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,,,, i64} %0, 8 store i64 %2, i64* %outvl @@ -4717,7 +4717,7 @@ } declare {,, i64} @llvm.riscv.vlseg2ff.nxv8f16(half* , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8f16(,, half*, , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8f16(,, half*, , i64, i64) define @test_vlseg2ff_nxv8f16(half* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv8f16: @@ -4740,14 +4740,14 @@ ; CHECK-LABEL: test_vlseg2ff_mask_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8f16( %val, %val, half* %base, %mask, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8f16( %val, %val, half* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,, i64} %0, 1 %2 = extractvalue {,, i64} %0, 2 store i64 %2, i64* %outvl @@ -4755,7 +4755,7 @@ } declare {,,, i64} @llvm.riscv.vlseg3ff.nxv8f16(half* , i64) -declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv8f16(,,, half*, , i64) +declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv8f16(,,, half*, , i64, i64) define @test_vlseg3ff_nxv8f16(half* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv8f16: @@ -4779,14 +4779,14 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vlseg3e16ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv8f16( %val, %val, %val, half* %base, %mask, i64 %vl) + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv8f16( %val, %val, %val, half* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,, i64} %0, 1 %2 = extractvalue {,,, i64} %0, 3 store i64 %2, i64* %outvl @@ -4794,7 +4794,7 @@ } declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv8f16(half* , i64) -declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv8f16(,,,, half*, , i64) +declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv8f16(,,,, half*, , i64, i64) define @test_vlseg4ff_nxv8f16(half* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv8f16: @@ -4819,14 +4819,14 @@ ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vlseg4e16ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv8f16( %val, %val, %val, %val, half* %base, %mask, i64 %vl) + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv8f16( %val, %val, %val, %val, half* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,, i64} %0, 1 %2 = extractvalue {,,,, i64} %0, 4 store i64 %2, i64* %outvl @@ -4834,7 +4834,7 @@ } declare {,, i64} @llvm.riscv.vlseg2ff.nxv8f32(float* , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8f32(,, float*, , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8f32(,, float*, , i64, i64) define @test_vlseg2ff_nxv8f32(float* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv8f32: @@ -4857,14 +4857,14 @@ ; CHECK-LABEL: test_vlseg2ff_mask_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vlseg2e32ff.v v4, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8f32( %val, %val, float* %base, %mask, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8f32( %val, %val, float* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,, i64} %0, 1 %2 = extractvalue {,, i64} %0, 2 store i64 %2, i64* %outvl @@ -4872,7 +4872,7 @@ } declare {,, i64} @llvm.riscv.vlseg2ff.nxv2f64(double* , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2f64(,, double*, , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2f64(,, double*, , i64, i64) define @test_vlseg2ff_nxv2f64(double* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv2f64: @@ -4895,14 +4895,14 @@ ; CHECK-LABEL: test_vlseg2ff_mask_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vlseg2e64ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2f64( %val, %val, double* %base, %mask, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2f64( %val, %val, double* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,, i64} %0, 1 %2 = extractvalue {,, i64} %0, 2 store i64 %2, i64* %outvl @@ -4910,7 +4910,7 @@ } declare {,,, i64} @llvm.riscv.vlseg3ff.nxv2f64(double* , i64) -declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2f64(,,, double*, , i64) +declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2f64(,,, double*, , i64, i64) define @test_vlseg3ff_nxv2f64(double* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv2f64: @@ -4934,14 +4934,14 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vlseg3e64ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2f64( %val, %val, %val, double* %base, %mask, i64 %vl) + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2f64( %val, %val, %val, double* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,, i64} %0, 1 %2 = extractvalue {,,, i64} %0, 3 store i64 %2, i64* %outvl @@ -4949,7 +4949,7 @@ } declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv2f64(double* , i64) -declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2f64(,,,, double*, , i64) +declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2f64(,,,, double*, , i64, i64) define @test_vlseg4ff_nxv2f64(double* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv2f64: @@ -4974,14 +4974,14 @@ ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vlseg4e64ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2f64( %val, %val, %val, %val, double* %base, %mask, i64 %vl) + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2f64( %val, %val, %val, %val, double* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,, i64} %0, 1 %2 = extractvalue {,,,, i64} %0, 4 store i64 %2, i64* %outvl @@ -4989,7 +4989,7 @@ } declare {,, i64} @llvm.riscv.vlseg2ff.nxv4f16(half* , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4f16(,, half*, , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4f16(,, half*, , i64, i64) define @test_vlseg2ff_nxv4f16(half* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv4f16: @@ -5012,14 +5012,14 @@ ; CHECK-LABEL: test_vlseg2ff_mask_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4f16( %val, %val, half* %base, %mask, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4f16( %val, %val, half* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,, i64} %0, 1 %2 = extractvalue {,, i64} %0, 2 store i64 %2, i64* %outvl @@ -5027,7 +5027,7 @@ } declare {,,, i64} @llvm.riscv.vlseg3ff.nxv4f16(half* , i64) -declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv4f16(,,, half*, , i64) +declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv4f16(,,, half*, , i64, i64) define @test_vlseg3ff_nxv4f16(half* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv4f16: @@ -5051,14 +5051,14 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv4f16( %val, %val, %val, half* %base, %mask, i64 %vl) + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv4f16( %val, %val, %val, half* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,, i64} %0, 1 %2 = extractvalue {,,, i64} %0, 3 store i64 %2, i64* %outvl @@ -5066,7 +5066,7 @@ } declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv4f16(half* , i64) -declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv4f16(,,,, half*, , i64) +declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv4f16(,,,, half*, , i64, i64) define @test_vlseg4ff_nxv4f16(half* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv4f16: @@ -5091,14 +5091,14 @@ ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv4f16( %val, %val, %val, %val, half* %base, %mask, i64 %vl) + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv4f16( %val, %val, %val, %val, half* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,, i64} %0, 1 %2 = extractvalue {,,,, i64} %0, 4 store i64 %2, i64* %outvl @@ -5106,7 +5106,7 @@ } declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv4f16(half* , i64) -declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv4f16(,,,,, half*, , i64) +declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv4f16(,,,,, half*, , i64, i64) define @test_vlseg5ff_nxv4f16(half* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv4f16: @@ -5132,14 +5132,14 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv4f16( %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl) + %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv4f16( %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,, i64} %0, 1 %2 = extractvalue {,,,,, i64} %0, 5 store i64 %2, i64* %outvl @@ -5147,7 +5147,7 @@ } declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv4f16(half* , i64) -declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv4f16(,,,,,, half*, , i64) +declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv4f16(,,,,,, half*, , i64, i64) define @test_vlseg6ff_nxv4f16(half* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv4f16: @@ -5174,14 +5174,14 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv4f16( %val, %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl) + %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv4f16( %val, %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,, i64} %0, 6 store i64 %2, i64* %outvl @@ -5189,7 +5189,7 @@ } declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv4f16(half* , i64) -declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv4f16(,,,,,,, half*, , i64) +declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv4f16(,,,,,,, half*, , i64, i64) define @test_vlseg7ff_nxv4f16(half* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv4f16: @@ -5217,14 +5217,14 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv4f16( %val, %val, %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl) + %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv4f16( %val, %val, %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,,, i64} %0, 7 store i64 %2, i64* %outvl @@ -5232,7 +5232,7 @@ } declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv4f16(half* , i64) -declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv4f16(,,,,,,,, half*, , i64) +declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv4f16(,,,,,,,, half*, , i64, i64) define @test_vlseg8ff_nxv4f16(half* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv4f16: @@ -5261,14 +5261,14 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv4f16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl) + %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv4f16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,,,, i64} %0, 8 store i64 %2, i64* %outvl @@ -5276,7 +5276,7 @@ } declare {,, i64} @llvm.riscv.vlseg2ff.nxv2f16(half* , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2f16(,, half*, , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2f16(,, half*, , i64, i64) define @test_vlseg2ff_nxv2f16(half* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv2f16: @@ -5299,14 +5299,14 @@ ; CHECK-LABEL: test_vlseg2ff_mask_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2f16( %val, %val, half* %base, %mask, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2f16( %val, %val, half* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,, i64} %0, 1 %2 = extractvalue {,, i64} %0, 2 store i64 %2, i64* %outvl @@ -5314,7 +5314,7 @@ } declare {,,, i64} @llvm.riscv.vlseg3ff.nxv2f16(half* , i64) -declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2f16(,,, half*, , i64) +declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2f16(,,, half*, , i64, i64) define @test_vlseg3ff_nxv2f16(half* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv2f16: @@ -5338,14 +5338,14 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2f16( %val, %val, %val, half* %base, %mask, i64 %vl) + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2f16( %val, %val, %val, half* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,, i64} %0, 1 %2 = extractvalue {,,, i64} %0, 3 store i64 %2, i64* %outvl @@ -5353,7 +5353,7 @@ } declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv2f16(half* , i64) -declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2f16(,,,, half*, , i64) +declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2f16(,,,, half*, , i64, i64) define @test_vlseg4ff_nxv2f16(half* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv2f16: @@ -5378,14 +5378,14 @@ ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2f16( %val, %val, %val, %val, half* %base, %mask, i64 %vl) + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2f16( %val, %val, %val, %val, half* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,, i64} %0, 1 %2 = extractvalue {,,,, i64} %0, 4 store i64 %2, i64* %outvl @@ -5393,7 +5393,7 @@ } declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv2f16(half* , i64) -declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv2f16(,,,,, half*, , i64) +declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv2f16(,,,,, half*, , i64, i64) define @test_vlseg5ff_nxv2f16(half* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv2f16: @@ -5419,14 +5419,14 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv2f16( %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl) + %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv2f16( %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,, i64} %0, 1 %2 = extractvalue {,,,,, i64} %0, 5 store i64 %2, i64* %outvl @@ -5434,7 +5434,7 @@ } declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv2f16(half* , i64) -declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv2f16(,,,,,, half*, , i64) +declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv2f16(,,,,,, half*, , i64, i64) define @test_vlseg6ff_nxv2f16(half* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv2f16: @@ -5461,14 +5461,14 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv2f16( %val, %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl) + %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv2f16( %val, %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,, i64} %0, 6 store i64 %2, i64* %outvl @@ -5476,7 +5476,7 @@ } declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv2f16(half* , i64) -declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv2f16(,,,,,,, half*, , i64) +declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv2f16(,,,,,,, half*, , i64, i64) define @test_vlseg7ff_nxv2f16(half* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv2f16: @@ -5504,14 +5504,14 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv2f16( %val, %val, %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl) + %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv2f16( %val, %val, %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,,, i64} %0, 7 store i64 %2, i64* %outvl @@ -5519,7 +5519,7 @@ } declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv2f16(half* , i64) -declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv2f16(,,,,,,,, half*, , i64) +declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv2f16(,,,,,,,, half*, , i64, i64) define @test_vlseg8ff_nxv2f16(half* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv2f16: @@ -5548,14 +5548,14 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv2f16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl) + %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv2f16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,,,, i64} %0, 8 store i64 %2, i64* %outvl @@ -5563,7 +5563,7 @@ } declare {,, i64} @llvm.riscv.vlseg2ff.nxv4f32(float* , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4f32(,, float*, , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4f32(,, float*, , i64, i64) define @test_vlseg2ff_nxv4f32(float* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv4f32: @@ -5586,14 +5586,14 @@ ; CHECK-LABEL: test_vlseg2ff_mask_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vlseg2e32ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4f32( %val, %val, float* %base, %mask, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4f32( %val, %val, float* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,, i64} %0, 1 %2 = extractvalue {,, i64} %0, 2 store i64 %2, i64* %outvl @@ -5601,7 +5601,7 @@ } declare {,,, i64} @llvm.riscv.vlseg3ff.nxv4f32(float* , i64) -declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv4f32(,,, float*, , i64) +declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv4f32(,,, float*, , i64, i64) define @test_vlseg3ff_nxv4f32(float* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv4f32: @@ -5625,14 +5625,14 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vlseg3e32ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv4f32( %val, %val, %val, float* %base, %mask, i64 %vl) + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv4f32( %val, %val, %val, float* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,, i64} %0, 1 %2 = extractvalue {,,, i64} %0, 3 store i64 %2, i64* %outvl @@ -5640,7 +5640,7 @@ } declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv4f32(float* , i64) -declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv4f32(,,,, float*, , i64) +declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv4f32(,,,, float*, , i64, i64) define @test_vlseg4ff_nxv4f32(float* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv4f32: @@ -5665,14 +5665,14 @@ ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vlseg4e32ff.v v6, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv4f32( %val, %val, %val, %val, float* %base, %mask, i64 %vl) + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv4f32( %val, %val, %val, %val, float* %base, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,, i64} %0, 1 %2 = extractvalue {,,,, i64} %0, 4 store i64 %2, i64* %outvl diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv32.ll @@ -3,7 +3,7 @@ ; RUN: -verify-machineinstrs < %s | FileCheck %s declare {,} @llvm.riscv.vlsseg2.nxv16i16(i16*, i32, i32) -declare {,} @llvm.riscv.vlsseg2.mask.nxv16i16(,, i16*, i32, , i32) +declare {,} @llvm.riscv.vlsseg2.mask.nxv16i16(,, i16*, i32, , i32, i32) define @test_vlsseg2_nxv16i16(i16* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv16i16: @@ -24,20 +24,19 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu ; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1 ; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: vsetvli zero, zero, e16, m4, tu, mu ; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16i16(i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv16i16( %1, %1, i16* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv16i16( %1, %1, i16* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlsseg2.nxv1i8(i8*, i32, i32) -declare {,} @llvm.riscv.vlsseg2.mask.nxv1i8(,, i8*, i32, , i32) +declare {,} @llvm.riscv.vlsseg2.mask.nxv1i8(,, i8*, i32, , i32, i32) define @test_vlsseg2_nxv1i8(i8* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv1i8: @@ -58,20 +57,19 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1 ; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, tu, mu ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i8(i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv1i8( %1, %1, i8* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv1i8( %1, %1, i8* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlsseg3.nxv1i8(i8*, i32, i32) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv1i8(,,, i8*, i32, , i32) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv1i8(,,, i8*, i32, , i32, i32) define @test_vlsseg3_nxv1i8(i8* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv1i8: @@ -93,20 +91,19 @@ ; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1 ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, tu, mu ; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i8(i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv1i8( %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv1i8( %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlsseg4.nxv1i8(i8*, i32, i32) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv1i8(,,,, i8*, i32, , i32) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv1i8(,,,, i8*, i32, , i32, i32) define @test_vlsseg4_nxv1i8(i8* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv1i8: @@ -129,20 +126,19 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, tu, mu ; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i8(i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv1i8( %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv1i8( %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,,,,} @llvm.riscv.vlsseg5.nxv1i8(i8*, i32, i32) -declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i8(,,,,, i8*, i32, , i32) +declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i8(,,,,, i8*, i32, , i32, i32) define @test_vlsseg5_nxv1i8(i8* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv1i8: @@ -166,20 +162,19 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, tu, mu ; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i8(i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i8( %1, %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i8( %1, %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } declare {,,,,,} @llvm.riscv.vlsseg6.nxv1i8(i8*, i32, i32) -declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i8(,,,,,, i8*, i32, , i32) +declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i8(,,,,,, i8*, i32, , i32, i32) define @test_vlsseg6_nxv1i8(i8* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv1i8: @@ -204,20 +199,19 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, tu, mu ; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i8(i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i8( %1, %1, %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i8( %1, %1, %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } declare {,,,,,,} @llvm.riscv.vlsseg7.nxv1i8(i8*, i32, i32) -declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i8(,,,,,,, i8*, i32, , i32) +declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i8(,,,,,,, i8*, i32, , i32, i32) define @test_vlsseg7_nxv1i8(i8* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv1i8: @@ -243,20 +237,19 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, tu, mu ; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i8(i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i8(i8*, i32, i32) -declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i8(,,,,,,,, i8*, i32, , i32) +declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i8(,,,,,,,, i8*, i32, , i32, i32) define @test_vlsseg8_nxv1i8(i8* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv1i8: @@ -283,20 +276,19 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, tu, mu ; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i8(i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlsseg2.nxv16i8(i8*, i32, i32) -declare {,} @llvm.riscv.vlsseg2.mask.nxv16i8(,, i8*, i32, , i32) +declare {,} @llvm.riscv.vlsseg2.mask.nxv16i8(,, i8*, i32, , i32, i32) define @test_vlsseg2_nxv16i8(i8* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv16i8: @@ -317,20 +309,19 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; CHECK-NEXT: vlsseg2e8.v v6, (a0), a1 ; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vsetvli zero, zero, e8, m2, tu, mu ; CHECK-NEXT: vlsseg2e8.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16i8(i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv16i8( %1, %1, i8* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv16i8( %1, %1, i8* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlsseg3.nxv16i8(i8*, i32, i32) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv16i8(,,, i8*, i32, , i32) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv16i8(,,, i8*, i32, , i32, i32) define @test_vlsseg3_nxv16i8(i8* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv16i8: @@ -352,20 +343,19 @@ ; CHECK-NEXT: vlsseg3e8.v v6, (a0), a1 ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vsetvli zero, zero, e8, m2, tu, mu ; CHECK-NEXT: vlsseg3e8.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv16i8(i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv16i8( %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv16i8( %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlsseg4.nxv16i8(i8*, i32, i32) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv16i8(,,,, i8*, i32, , i32) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv16i8(,,,, i8*, i32, , i32, i32) define @test_vlsseg4_nxv16i8(i8* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv16i8: @@ -388,20 +378,19 @@ ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vsetvli zero, zero, e8, m2, tu, mu ; CHECK-NEXT: vlsseg4e8.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv16i8(i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv16i8( %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv16i8( %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlsseg2.nxv2i32(i32*, i32, i32) -declare {,} @llvm.riscv.vlsseg2.mask.nxv2i32(,, i32*, i32, , i32) +declare {,} @llvm.riscv.vlsseg2.mask.nxv2i32(,, i32*, i32, , i32, i32) define @test_vlsseg2_nxv2i32(i32* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv2i32: @@ -422,20 +411,19 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1 ; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i32(i32* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv2i32( %1, %1, i32* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv2i32( %1, %1, i32* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlsseg3.nxv2i32(i32*, i32, i32) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv2i32(,,, i32*, i32, , i32) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv2i32(,,, i32*, i32, , i32, i32) define @test_vlsseg3_nxv2i32(i32* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv2i32: @@ -457,20 +445,19 @@ ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1 ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i32(i32* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv2i32( %1, %1, %1, i32* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv2i32( %1, %1, %1, i32* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlsseg4.nxv2i32(i32*, i32, i32) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv2i32(,,,, i32*, i32, , i32) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv2i32(,,,, i32*, i32, , i32, i32) define @test_vlsseg4_nxv2i32(i32* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv2i32: @@ -493,20 +480,19 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i32(i32* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv2i32( %1, %1, %1, %1, i32* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv2i32( %1, %1, %1, %1, i32* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,,,,} @llvm.riscv.vlsseg5.nxv2i32(i32*, i32, i32) -declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv2i32(,,,,, i32*, i32, , i32) +declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv2i32(,,,,, i32*, i32, , i32, i32) define @test_vlsseg5_nxv2i32(i32* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv2i32: @@ -530,20 +516,19 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2i32(i32* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv2i32( %1, %1, %1, %1, %1, i32* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv2i32( %1, %1, %1, %1, %1, i32* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } declare {,,,,,} @llvm.riscv.vlsseg6.nxv2i32(i32*, i32, i32) -declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2i32(,,,,,, i32*, i32, , i32) +declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2i32(,,,,,, i32*, i32, , i32, i32) define @test_vlsseg6_nxv2i32(i32* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv2i32: @@ -568,20 +553,19 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2i32(i32* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2i32( %1, %1, %1, %1, %1, %1, i32* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2i32( %1, %1, %1, %1, %1, %1, i32* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } declare {,,,,,,} @llvm.riscv.vlsseg7.nxv2i32(i32*, i32, i32) -declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2i32(,,,,,,, i32*, i32, , i32) +declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2i32(,,,,,,, i32*, i32, , i32, i32) define @test_vlsseg7_nxv2i32(i32* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv2i32: @@ -607,20 +591,19 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2i32(i32* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2i32( %1, %1, %1, %1, %1, %1, %1, i32* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2i32( %1, %1, %1, %1, %1, %1, %1, i32* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i32(i32*, i32, i32) -declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2i32(,,,,,,,, i32*, i32, , i32) +declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2i32(,,,,,,,, i32*, i32, , i32, i32) define @test_vlsseg8_nxv2i32(i32* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv2i32: @@ -647,20 +630,19 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i32(i32* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2i32( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2i32( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlsseg2.nxv4i16(i16*, i32, i32) -declare {,} @llvm.riscv.vlsseg2.mask.nxv4i16(,, i16*, i32, , i32) +declare {,} @llvm.riscv.vlsseg2.mask.nxv4i16(,, i16*, i32, , i32, i32) define @test_vlsseg2_nxv4i16(i16* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv4i16: @@ -681,20 +663,19 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 ; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i16(i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv4i16( %1, %1, i16* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv4i16( %1, %1, i16* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlsseg3.nxv4i16(i16*, i32, i32) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv4i16(,,, i16*, i32, , i32) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv4i16(,,, i16*, i32, , i32, i32) define @test_vlsseg3_nxv4i16(i16* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv4i16: @@ -716,20 +697,19 @@ ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4i16(i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv4i16( %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv4i16( %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlsseg4.nxv4i16(i16*, i32, i32) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv4i16(,,,, i16*, i32, , i32) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv4i16(,,,, i16*, i32, , i32, i32) define @test_vlsseg4_nxv4i16(i16* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv4i16: @@ -752,20 +732,19 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4i16(i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv4i16( %1, %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv4i16( %1, %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,,,,} @llvm.riscv.vlsseg5.nxv4i16(i16*, i32, i32) -declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv4i16(,,,,, i16*, i32, , i32) +declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv4i16(,,,,, i16*, i32, , i32, i32) define @test_vlsseg5_nxv4i16(i16* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv4i16: @@ -789,20 +768,19 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv4i16(i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv4i16( %1, %1, %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv4i16( %1, %1, %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } declare {,,,,,} @llvm.riscv.vlsseg6.nxv4i16(i16*, i32, i32) -declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv4i16(,,,,,, i16*, i32, , i32) +declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv4i16(,,,,,, i16*, i32, , i32, i32) define @test_vlsseg6_nxv4i16(i16* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv4i16: @@ -827,20 +805,19 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv4i16(i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv4i16( %1, %1, %1, %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv4i16( %1, %1, %1, %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } declare {,,,,,,} @llvm.riscv.vlsseg7.nxv4i16(i16*, i32, i32) -declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv4i16(,,,,,,, i16*, i32, , i32) +declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv4i16(,,,,,,, i16*, i32, , i32, i32) define @test_vlsseg7_nxv4i16(i16* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv4i16: @@ -866,20 +843,19 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv4i16(i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv4i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv4i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv4i16(i16*, i32, i32) -declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv4i16(,,,,,,,, i16*, i32, , i32) +declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv4i16(,,,,,,,, i16*, i32, , i32, i32) define @test_vlsseg8_nxv4i16(i16* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv4i16: @@ -906,20 +882,19 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv4i16(i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv4i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv4i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlsseg2.nxv1i32(i32*, i32, i32) -declare {,} @llvm.riscv.vlsseg2.mask.nxv1i32(,, i32*, i32, , i32) +declare {,} @llvm.riscv.vlsseg2.mask.nxv1i32(,, i32*, i32, , i32, i32) define @test_vlsseg2_nxv1i32(i32* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv1i32: @@ -940,20 +915,19 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1 ; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i32(i32* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv1i32( %1, %1, i32* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv1i32( %1, %1, i32* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlsseg3.nxv1i32(i32*, i32, i32) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv1i32(,,, i32*, i32, , i32) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv1i32(,,, i32*, i32, , i32, i32) define @test_vlsseg3_nxv1i32(i32* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv1i32: @@ -975,20 +949,19 @@ ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1 ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i32(i32* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv1i32( %1, %1, %1, i32* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv1i32( %1, %1, %1, i32* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlsseg4.nxv1i32(i32*, i32, i32) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv1i32(,,,, i32*, i32, , i32) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv1i32(,,,, i32*, i32, , i32, i32) define @test_vlsseg4_nxv1i32(i32* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv1i32: @@ -1011,20 +984,19 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i32(i32* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv1i32( %1, %1, %1, %1, i32* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv1i32( %1, %1, %1, %1, i32* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,,,,} @llvm.riscv.vlsseg5.nxv1i32(i32*, i32, i32) -declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i32(,,,,, i32*, i32, , i32) +declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i32(,,,,, i32*, i32, , i32, i32) define @test_vlsseg5_nxv1i32(i32* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv1i32: @@ -1048,20 +1020,19 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i32(i32* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i32( %1, %1, %1, %1, %1, i32* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i32( %1, %1, %1, %1, %1, i32* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } declare {,,,,,} @llvm.riscv.vlsseg6.nxv1i32(i32*, i32, i32) -declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i32(,,,,,, i32*, i32, , i32) +declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i32(,,,,,, i32*, i32, , i32, i32) define @test_vlsseg6_nxv1i32(i32* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv1i32: @@ -1086,20 +1057,19 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i32(i32* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i32( %1, %1, %1, %1, %1, %1, i32* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i32( %1, %1, %1, %1, %1, %1, i32* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } declare {,,,,,,} @llvm.riscv.vlsseg7.nxv1i32(i32*, i32, i32) -declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i32(,,,,,,, i32*, i32, , i32) +declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i32(,,,,,,, i32*, i32, , i32, i32) define @test_vlsseg7_nxv1i32(i32* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv1i32: @@ -1125,20 +1095,19 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i32(i32* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i32( %1, %1, %1, %1, %1, %1, %1, i32* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i32( %1, %1, %1, %1, %1, %1, %1, i32* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i32(i32*, i32, i32) -declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i32(,,,,,,,, i32*, i32, , i32) +declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i32(,,,,,,,, i32*, i32, , i32, i32) define @test_vlsseg8_nxv1i32(i32* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv1i32: @@ -1165,20 +1134,19 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i32(i32* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i32( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i32( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlsseg2.nxv8i16(i16*, i32, i32) -declare {,} @llvm.riscv.vlsseg2.mask.nxv8i16(,, i16*, i32, , i32) +declare {,} @llvm.riscv.vlsseg2.mask.nxv8i16(,, i16*, i32, , i32, i32) define @test_vlsseg2_nxv8i16(i16* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv8i16: @@ -1199,20 +1167,19 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu ; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1 ; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, tu, mu ; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8i16(i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv8i16( %1, %1, i16* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv8i16( %1, %1, i16* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlsseg3.nxv8i16(i16*, i32, i32) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv8i16(,,, i16*, i32, , i32) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv8i16(,,, i16*, i32, , i32, i32) define @test_vlsseg3_nxv8i16(i16* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv8i16: @@ -1234,20 +1201,19 @@ ; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1 ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, tu, mu ; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv8i16(i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv8i16( %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv8i16( %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlsseg4.nxv8i16(i16*, i32, i32) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv8i16(,,,, i16*, i32, , i32) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv8i16(,,,, i16*, i32, , i32, i32) define @test_vlsseg4_nxv8i16(i16* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv8i16: @@ -1270,20 +1236,19 @@ ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, tu, mu ; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv8i16(i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv8i16( %1, %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv8i16( %1, %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlsseg2.nxv8i8(i8*, i32, i32) -declare {,} @llvm.riscv.vlsseg2.mask.nxv8i8(,, i8*, i32, , i32) +declare {,} @llvm.riscv.vlsseg2.mask.nxv8i8(,, i8*, i32, , i32, i32) define @test_vlsseg2_nxv8i8(i8* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv8i8: @@ -1304,20 +1269,19 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1 ; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, m1, tu, mu ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8i8(i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv8i8( %1, %1, i8* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv8i8( %1, %1, i8* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlsseg3.nxv8i8(i8*, i32, i32) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv8i8(,,, i8*, i32, , i32) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv8i8(,,, i8*, i32, , i32, i32) define @test_vlsseg3_nxv8i8(i8* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv8i8: @@ -1339,20 +1303,19 @@ ; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1 ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, m1, tu, mu ; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv8i8(i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv8i8( %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv8i8( %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlsseg4.nxv8i8(i8*, i32, i32) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv8i8(,,,, i8*, i32, , i32) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv8i8(,,,, i8*, i32, , i32, i32) define @test_vlsseg4_nxv8i8(i8* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv8i8: @@ -1375,20 +1338,19 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, m1, tu, mu ; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv8i8(i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv8i8( %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv8i8( %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,,,,} @llvm.riscv.vlsseg5.nxv8i8(i8*, i32, i32) -declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv8i8(,,,,, i8*, i32, , i32) +declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv8i8(,,,,, i8*, i32, , i32, i32) define @test_vlsseg5_nxv8i8(i8* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv8i8: @@ -1412,20 +1374,19 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, m1, tu, mu ; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv8i8(i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv8i8( %1, %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv8i8( %1, %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } declare {,,,,,} @llvm.riscv.vlsseg6.nxv8i8(i8*, i32, i32) -declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv8i8(,,,,,, i8*, i32, , i32) +declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv8i8(,,,,,, i8*, i32, , i32, i32) define @test_vlsseg6_nxv8i8(i8* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv8i8: @@ -1450,20 +1411,19 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, m1, tu, mu ; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv8i8(i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv8i8( %1, %1, %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv8i8( %1, %1, %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } declare {,,,,,,} @llvm.riscv.vlsseg7.nxv8i8(i8*, i32, i32) -declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv8i8(,,,,,,, i8*, i32, , i32) +declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv8i8(,,,,,,, i8*, i32, , i32, i32) define @test_vlsseg7_nxv8i8(i8* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv8i8: @@ -1489,20 +1449,19 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, m1, tu, mu ; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv8i8(i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv8i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv8i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv8i8(i8*, i32, i32) -declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv8i8(,,,,,,,, i8*, i32, , i32) +declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv8i8(,,,,,,,, i8*, i32, , i32, i32) define @test_vlsseg8_nxv8i8(i8* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv8i8: @@ -1529,20 +1488,19 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, m1, tu, mu ; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv8i8(i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv8i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv8i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlsseg2.nxv8i32(i32*, i32, i32) -declare {,} @llvm.riscv.vlsseg2.mask.nxv8i32(,, i32*, i32, , i32) +declare {,} @llvm.riscv.vlsseg2.mask.nxv8i32(,, i32*, i32, , i32, i32) define @test_vlsseg2_nxv8i32(i32* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv8i32: @@ -1563,20 +1521,19 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu ; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1 ; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: vsetvli zero, zero, e32, m4, tu, mu ; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8i32(i32* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv8i32( %1, %1, i32* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv8i32( %1, %1, i32* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlsseg2.nxv4i8(i8*, i32, i32) -declare {,} @llvm.riscv.vlsseg2.mask.nxv4i8(,, i8*, i32, , i32) +declare {,} @llvm.riscv.vlsseg2.mask.nxv4i8(,, i8*, i32, , i32, i32) define @test_vlsseg2_nxv4i8(i8* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv4i8: @@ -1597,20 +1554,19 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1 ; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf2, tu, mu ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i8(i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv4i8( %1, %1, i8* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv4i8( %1, %1, i8* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlsseg3.nxv4i8(i8*, i32, i32) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv4i8(,,, i8*, i32, , i32) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv4i8(,,, i8*, i32, , i32, i32) define @test_vlsseg3_nxv4i8(i8* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv4i8: @@ -1632,20 +1588,19 @@ ; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1 ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf2, tu, mu ; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4i8(i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv4i8( %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv4i8( %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlsseg4.nxv4i8(i8*, i32, i32) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv4i8(,,,, i8*, i32, , i32) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv4i8(,,,, i8*, i32, , i32, i32) define @test_vlsseg4_nxv4i8(i8* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv4i8: @@ -1668,20 +1623,19 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf2, tu, mu ; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4i8(i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv4i8( %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv4i8( %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,,,,} @llvm.riscv.vlsseg5.nxv4i8(i8*, i32, i32) -declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv4i8(,,,,, i8*, i32, , i32) +declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv4i8(,,,,, i8*, i32, , i32, i32) define @test_vlsseg5_nxv4i8(i8* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv4i8: @@ -1705,20 +1659,19 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf2, tu, mu ; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv4i8(i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv4i8( %1, %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv4i8( %1, %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } declare {,,,,,} @llvm.riscv.vlsseg6.nxv4i8(i8*, i32, i32) -declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv4i8(,,,,,, i8*, i32, , i32) +declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv4i8(,,,,,, i8*, i32, , i32, i32) define @test_vlsseg6_nxv4i8(i8* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv4i8: @@ -1743,20 +1696,19 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf2, tu, mu ; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv4i8(i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv4i8( %1, %1, %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv4i8( %1, %1, %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } declare {,,,,,,} @llvm.riscv.vlsseg7.nxv4i8(i8*, i32, i32) -declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv4i8(,,,,,,, i8*, i32, , i32) +declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv4i8(,,,,,,, i8*, i32, , i32, i32) define @test_vlsseg7_nxv4i8(i8* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv4i8: @@ -1782,20 +1734,19 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf2, tu, mu ; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv4i8(i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv4i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv4i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv4i8(i8*, i32, i32) -declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv4i8(,,,,,,,, i8*, i32, , i32) +declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv4i8(,,,,,,,, i8*, i32, , i32, i32) define @test_vlsseg8_nxv4i8(i8* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv4i8: @@ -1822,20 +1773,19 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf2, tu, mu ; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv4i8(i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv4i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv4i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlsseg2.nxv1i16(i16*, i32, i32) -declare {,} @llvm.riscv.vlsseg2.mask.nxv1i16(,, i16*, i32, , i32) +declare {,} @llvm.riscv.vlsseg2.mask.nxv1i16(,, i16*, i32, , i32, i32) define @test_vlsseg2_nxv1i16(i16* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv1i16: @@ -1856,20 +1806,19 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 ; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, tu, mu ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i16(i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv1i16( %1, %1, i16* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv1i16( %1, %1, i16* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlsseg3.nxv1i16(i16*, i32, i32) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv1i16(,,, i16*, i32, , i32) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv1i16(,,, i16*, i32, , i32, i32) define @test_vlsseg3_nxv1i16(i16* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv1i16: @@ -1891,20 +1840,19 @@ ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, tu, mu ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i16(i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv1i16( %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv1i16( %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlsseg4.nxv1i16(i16*, i32, i32) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv1i16(,,,, i16*, i32, , i32) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv1i16(,,,, i16*, i32, , i32, i32) define @test_vlsseg4_nxv1i16(i16* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv1i16: @@ -1927,20 +1875,19 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, tu, mu ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i16(i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv1i16( %1, %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv1i16( %1, %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,,,,} @llvm.riscv.vlsseg5.nxv1i16(i16*, i32, i32) -declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i16(,,,,, i16*, i32, , i32) +declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i16(,,,,, i16*, i32, , i32, i32) define @test_vlsseg5_nxv1i16(i16* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv1i16: @@ -1964,20 +1911,19 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, tu, mu ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i16(i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i16( %1, %1, %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i16( %1, %1, %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } declare {,,,,,} @llvm.riscv.vlsseg6.nxv1i16(i16*, i32, i32) -declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i16(,,,,,, i16*, i32, , i32) +declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i16(,,,,,, i16*, i32, , i32, i32) define @test_vlsseg6_nxv1i16(i16* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv1i16: @@ -2002,20 +1948,19 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, tu, mu ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i16(i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i16( %1, %1, %1, %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i16( %1, %1, %1, %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } declare {,,,,,,} @llvm.riscv.vlsseg7.nxv1i16(i16*, i32, i32) -declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i16(,,,,,,, i16*, i32, , i32) +declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i16(,,,,,,, i16*, i32, , i32, i32) define @test_vlsseg7_nxv1i16(i16* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv1i16: @@ -2041,20 +1986,19 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, tu, mu ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i16(i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i16(i16*, i32, i32) -declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i16(,,,,,,,, i16*, i32, , i32) +declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i16(,,,,,,,, i16*, i32, , i32, i32) define @test_vlsseg8_nxv1i16(i16* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv1i16: @@ -2081,20 +2025,19 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, tu, mu ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i16(i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlsseg2.nxv32i8(i8*, i32, i32) -declare {,} @llvm.riscv.vlsseg2.mask.nxv32i8(,, i8*, i32, , i32) +declare {,} @llvm.riscv.vlsseg2.mask.nxv32i8(,, i8*, i32, , i32, i32) define @test_vlsseg2_nxv32i8(i8* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv32i8: @@ -2115,20 +2058,19 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu ; CHECK-NEXT: vlsseg2e8.v v4, (a0), a1 ; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: vsetvli zero, zero, e8, m4, tu, mu ; CHECK-NEXT: vlsseg2e8.v v4, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv32i8(i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv32i8( %1, %1, i8* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv32i8( %1, %1, i8* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlsseg2.nxv2i8(i8*, i32, i32) -declare {,} @llvm.riscv.vlsseg2.mask.nxv2i8(,, i8*, i32, , i32) +declare {,} @llvm.riscv.vlsseg2.mask.nxv2i8(,, i8*, i32, , i32, i32) define @test_vlsseg2_nxv2i8(i8* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv2i8: @@ -2149,20 +2091,19 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1 ; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, tu, mu ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i8(i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv2i8( %1, %1, i8* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv2i8( %1, %1, i8* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlsseg3.nxv2i8(i8*, i32, i32) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv2i8(,,, i8*, i32, , i32) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv2i8(,,, i8*, i32, , i32, i32) define @test_vlsseg3_nxv2i8(i8* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv2i8: @@ -2184,20 +2125,19 @@ ; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1 ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, tu, mu ; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i8(i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv2i8( %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv2i8( %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlsseg4.nxv2i8(i8*, i32, i32) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv2i8(,,,, i8*, i32, , i32) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv2i8(,,,, i8*, i32, , i32, i32) define @test_vlsseg4_nxv2i8(i8* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv2i8: @@ -2220,20 +2160,19 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, tu, mu ; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i8(i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv2i8( %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv2i8( %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,,,,} @llvm.riscv.vlsseg5.nxv2i8(i8*, i32, i32) -declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv2i8(,,,,, i8*, i32, , i32) +declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv2i8(,,,,, i8*, i32, , i32, i32) define @test_vlsseg5_nxv2i8(i8* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv2i8: @@ -2257,20 +2196,19 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, tu, mu ; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2i8(i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv2i8( %1, %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv2i8( %1, %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } declare {,,,,,} @llvm.riscv.vlsseg6.nxv2i8(i8*, i32, i32) -declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2i8(,,,,,, i8*, i32, , i32) +declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2i8(,,,,,, i8*, i32, , i32, i32) define @test_vlsseg6_nxv2i8(i8* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv2i8: @@ -2295,20 +2233,19 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, tu, mu ; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2i8(i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2i8( %1, %1, %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2i8( %1, %1, %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } declare {,,,,,,} @llvm.riscv.vlsseg7.nxv2i8(i8*, i32, i32) -declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2i8(,,,,,,, i8*, i32, , i32) +declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2i8(,,,,,,, i8*, i32, , i32, i32) define @test_vlsseg7_nxv2i8(i8* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv2i8: @@ -2334,20 +2271,19 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, tu, mu ; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2i8(i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i8(i8*, i32, i32) -declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2i8(,,,,,,,, i8*, i32, , i32) +declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2i8(,,,,,,,, i8*, i32, , i32, i32) define @test_vlsseg8_nxv2i8(i8* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv2i8: @@ -2374,20 +2310,19 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, tu, mu ; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i8(i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlsseg2.nxv2i16(i16*, i32, i32) -declare {,} @llvm.riscv.vlsseg2.mask.nxv2i16(,, i16*, i32, , i32) +declare {,} @llvm.riscv.vlsseg2.mask.nxv2i16(,, i16*, i32, , i32, i32) define @test_vlsseg2_nxv2i16(i16* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv2i16: @@ -2408,20 +2343,19 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 ; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i16(i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv2i16( %1, %1, i16* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv2i16( %1, %1, i16* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlsseg3.nxv2i16(i16*, i32, i32) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv2i16(,,, i16*, i32, , i32) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv2i16(,,, i16*, i32, , i32, i32) define @test_vlsseg3_nxv2i16(i16* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv2i16: @@ -2443,20 +2377,19 @@ ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i16(i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv2i16( %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv2i16( %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlsseg4.nxv2i16(i16*, i32, i32) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv2i16(,,,, i16*, i32, , i32) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv2i16(,,,, i16*, i32, , i32, i32) define @test_vlsseg4_nxv2i16(i16* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv2i16: @@ -2479,20 +2412,19 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i16(i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv2i16( %1, %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv2i16( %1, %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,,,,} @llvm.riscv.vlsseg5.nxv2i16(i16*, i32, i32) -declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv2i16(,,,,, i16*, i32, , i32) +declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv2i16(,,,,, i16*, i32, , i32, i32) define @test_vlsseg5_nxv2i16(i16* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv2i16: @@ -2516,20 +2448,19 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2i16(i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv2i16( %1, %1, %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv2i16( %1, %1, %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } declare {,,,,,} @llvm.riscv.vlsseg6.nxv2i16(i16*, i32, i32) -declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2i16(,,,,,, i16*, i32, , i32) +declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2i16(,,,,,, i16*, i32, , i32, i32) define @test_vlsseg6_nxv2i16(i16* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv2i16: @@ -2554,20 +2485,19 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2i16(i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2i16( %1, %1, %1, %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2i16( %1, %1, %1, %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } declare {,,,,,,} @llvm.riscv.vlsseg7.nxv2i16(i16*, i32, i32) -declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2i16(,,,,,,, i16*, i32, , i32) +declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2i16(,,,,,,, i16*, i32, , i32, i32) define @test_vlsseg7_nxv2i16(i16* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv2i16: @@ -2593,20 +2523,19 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2i16(i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i16(i16*, i32, i32) -declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2i16(,,,,,,,, i16*, i32, , i32) +declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2i16(,,,,,,,, i16*, i32, , i32, i32) define @test_vlsseg8_nxv2i16(i16* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv2i16: @@ -2633,20 +2562,19 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i16(i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlsseg2.nxv4i32(i32*, i32, i32) -declare {,} @llvm.riscv.vlsseg2.mask.nxv4i32(,, i32*, i32, , i32) +declare {,} @llvm.riscv.vlsseg2.mask.nxv4i32(,, i32*, i32, , i32, i32) define @test_vlsseg2_nxv4i32(i32* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv4i32: @@ -2667,20 +2595,19 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu ; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1 ; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vsetvli zero, zero, e32, m2, tu, mu ; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i32(i32* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv4i32( %1, %1, i32* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv4i32( %1, %1, i32* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlsseg3.nxv4i32(i32*, i32, i32) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv4i32(,,, i32*, i32, , i32) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv4i32(,,, i32*, i32, , i32, i32) define @test_vlsseg3_nxv4i32(i32* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv4i32: @@ -2702,20 +2629,19 @@ ; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1 ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vsetvli zero, zero, e32, m2, tu, mu ; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4i32(i32* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv4i32( %1, %1, %1, i32* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv4i32( %1, %1, %1, i32* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlsseg4.nxv4i32(i32*, i32, i32) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv4i32(,,,, i32*, i32, , i32) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv4i32(,,,, i32*, i32, , i32, i32) define @test_vlsseg4_nxv4i32(i32* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv4i32: @@ -2738,20 +2664,19 @@ ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vsetvli zero, zero, e32, m2, tu, mu ; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4i32(i32* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv4i32( %1, %1, %1, %1, i32* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv4i32( %1, %1, %1, %1, i32* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlsseg2.nxv16f16(half*, i32, i32) -declare {,} @llvm.riscv.vlsseg2.mask.nxv16f16(,, half*, i32, , i32) +declare {,} @llvm.riscv.vlsseg2.mask.nxv16f16(,, half*, i32, , i32, i32) define @test_vlsseg2_nxv16f16(half* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv16f16: @@ -2772,20 +2697,19 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu ; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1 ; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: vsetvli zero, zero, e16, m4, tu, mu ; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16f16(half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv16f16( %1, %1, half* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv16f16( %1, %1, half* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlsseg2.nxv4f64(double*, i32, i32) -declare {,} @llvm.riscv.vlsseg2.mask.nxv4f64(,, double*, i32, , i32) +declare {,} @llvm.riscv.vlsseg2.mask.nxv4f64(,, double*, i32, , i32, i32) define @test_vlsseg2_nxv4f64(double* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv4f64: @@ -2806,20 +2730,19 @@ ; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlsseg2e64.v v4, (a0), a1 ; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu ; CHECK-NEXT: vlsseg2e64.v v4, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4f64(double* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv4f64( %1, %1, double* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv4f64( %1, %1, double* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlsseg2.nxv1f64(double*, i32, i32) -declare {,} @llvm.riscv.vlsseg2.mask.nxv1f64(,, double*, i32, , i32) +declare {,} @llvm.riscv.vlsseg2.mask.nxv1f64(,, double*, i32, , i32, i32) define @test_vlsseg2_nxv1f64(double* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv1f64: @@ -2840,20 +2763,19 @@ ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlsseg2e64.v v7, (a0), a1 ; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; CHECK-NEXT: vlsseg2e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1f64(double* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv1f64( %1, %1, double* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv1f64( %1, %1, double* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlsseg3.nxv1f64(double*, i32, i32) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv1f64(,,, double*, i32, , i32) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv1f64(,,, double*, i32, , i32, i32) define @test_vlsseg3_nxv1f64(double* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv1f64: @@ -2875,20 +2797,19 @@ ; CHECK-NEXT: vlsseg3e64.v v7, (a0), a1 ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; CHECK-NEXT: vlsseg3e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1f64(double* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv1f64( %1, %1, %1, double* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv1f64( %1, %1, %1, double* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlsseg4.nxv1f64(double*, i32, i32) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv1f64(,,,, double*, i32, , i32) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv1f64(,,,, double*, i32, , i32, i32) define @test_vlsseg4_nxv1f64(double* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv1f64: @@ -2911,20 +2832,19 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; CHECK-NEXT: vlsseg4e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1f64(double* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv1f64( %1, %1, %1, %1, double* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv1f64( %1, %1, %1, %1, double* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,,,,} @llvm.riscv.vlsseg5.nxv1f64(double*, i32, i32) -declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv1f64(,,,,, double*, i32, , i32) +declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv1f64(,,,,, double*, i32, , i32, i32) define @test_vlsseg5_nxv1f64(double* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv1f64: @@ -2948,20 +2868,19 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; CHECK-NEXT: vlsseg5e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1f64(double* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv1f64( %1, %1, %1, %1, %1, double* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv1f64( %1, %1, %1, %1, %1, double* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } declare {,,,,,} @llvm.riscv.vlsseg6.nxv1f64(double*, i32, i32) -declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1f64(,,,,,, double*, i32, , i32) +declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1f64(,,,,,, double*, i32, , i32, i32) define @test_vlsseg6_nxv1f64(double* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv1f64: @@ -2986,20 +2905,19 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; CHECK-NEXT: vlsseg6e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1f64(double* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1f64( %1, %1, %1, %1, %1, %1, double* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1f64( %1, %1, %1, %1, %1, %1, double* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } declare {,,,,,,} @llvm.riscv.vlsseg7.nxv1f64(double*, i32, i32) -declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1f64(,,,,,,, double*, i32, , i32) +declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1f64(,,,,,,, double*, i32, , i32, i32) define @test_vlsseg7_nxv1f64(double* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv1f64: @@ -3025,20 +2943,19 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; CHECK-NEXT: vlsseg7e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1f64(double* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1f64( %1, %1, %1, %1, %1, %1, %1, double* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1f64( %1, %1, %1, %1, %1, %1, %1, double* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f64(double*, i32, i32) -declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1f64(,,,,,,,, double*, i32, , i32) +declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1f64(,,,,,,,, double*, i32, , i32, i32) define @test_vlsseg8_nxv1f64(double* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv1f64: @@ -3065,20 +2982,19 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; CHECK-NEXT: vlsseg8e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f64(double* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1f64( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1f64( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlsseg2.nxv2f32(float*, i32, i32) -declare {,} @llvm.riscv.vlsseg2.mask.nxv2f32(,, float*, i32, , i32) +declare {,} @llvm.riscv.vlsseg2.mask.nxv2f32(,, float*, i32, , i32, i32) define @test_vlsseg2_nxv2f32(float* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv2f32: @@ -3099,20 +3015,19 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1 ; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2f32(float* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv2f32( %1, %1, float* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv2f32( %1, %1, float* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlsseg3.nxv2f32(float*, i32, i32) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv2f32(,,, float*, i32, , i32) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv2f32(,,, float*, i32, , i32, i32) define @test_vlsseg3_nxv2f32(float* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv2f32: @@ -3134,20 +3049,19 @@ ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1 ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2f32(float* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv2f32( %1, %1, %1, float* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv2f32( %1, %1, %1, float* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlsseg4.nxv2f32(float*, i32, i32) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv2f32(,,,, float*, i32, , i32) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv2f32(,,,, float*, i32, , i32, i32) define @test_vlsseg4_nxv2f32(float* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv2f32: @@ -3170,20 +3084,19 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2f32(float* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv2f32( %1, %1, %1, %1, float* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv2f32( %1, %1, %1, %1, float* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,,,,} @llvm.riscv.vlsseg5.nxv2f32(float*, i32, i32) -declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv2f32(,,,,, float*, i32, , i32) +declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv2f32(,,,,, float*, i32, , i32, i32) define @test_vlsseg5_nxv2f32(float* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv2f32: @@ -3207,20 +3120,19 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2f32(float* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv2f32( %1, %1, %1, %1, %1, float* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv2f32( %1, %1, %1, %1, %1, float* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } declare {,,,,,} @llvm.riscv.vlsseg6.nxv2f32(float*, i32, i32) -declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2f32(,,,,,, float*, i32, , i32) +declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2f32(,,,,,, float*, i32, , i32, i32) define @test_vlsseg6_nxv2f32(float* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv2f32: @@ -3245,20 +3157,19 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2f32(float* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2f32( %1, %1, %1, %1, %1, %1, float* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2f32( %1, %1, %1, %1, %1, %1, float* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } declare {,,,,,,} @llvm.riscv.vlsseg7.nxv2f32(float*, i32, i32) -declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2f32(,,,,,,, float*, i32, , i32) +declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2f32(,,,,,,, float*, i32, , i32, i32) define @test_vlsseg7_nxv2f32(float* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv2f32: @@ -3284,20 +3195,19 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2f32(float* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2f32( %1, %1, %1, %1, %1, %1, %1, float* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2f32( %1, %1, %1, %1, %1, %1, %1, float* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv2f32(float*, i32, i32) -declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2f32(,,,,,,,, float*, i32, , i32) +declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2f32(,,,,,,,, float*, i32, , i32, i32) define @test_vlsseg8_nxv2f32(float* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv2f32: @@ -3324,20 +3234,19 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2f32(float* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2f32( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2f32( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlsseg2.nxv1f16(half*, i32, i32) -declare {,} @llvm.riscv.vlsseg2.mask.nxv1f16(,, half*, i32, , i32) +declare {,} @llvm.riscv.vlsseg2.mask.nxv1f16(,, half*, i32, , i32, i32) define @test_vlsseg2_nxv1f16(half* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv1f16: @@ -3358,20 +3267,19 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 ; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, tu, mu ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1f16(half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv1f16( %1, %1, half* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv1f16( %1, %1, half* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlsseg3.nxv1f16(half*, i32, i32) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv1f16(,,, half*, i32, , i32) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv1f16(,,, half*, i32, , i32, i32) define @test_vlsseg3_nxv1f16(half* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv1f16: @@ -3393,20 +3301,19 @@ ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, tu, mu ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1f16(half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv1f16( %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv1f16( %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlsseg4.nxv1f16(half*, i32, i32) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv1f16(,,,, half*, i32, , i32) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv1f16(,,,, half*, i32, , i32, i32) define @test_vlsseg4_nxv1f16(half* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv1f16: @@ -3429,20 +3336,19 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, tu, mu ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1f16(half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv1f16( %1, %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv1f16( %1, %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,,,,} @llvm.riscv.vlsseg5.nxv1f16(half*, i32, i32) -declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv1f16(,,,,, half*, i32, , i32) +declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv1f16(,,,,, half*, i32, , i32, i32) define @test_vlsseg5_nxv1f16(half* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv1f16: @@ -3466,20 +3372,19 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, tu, mu ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1f16(half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv1f16( %1, %1, %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv1f16( %1, %1, %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } declare {,,,,,} @llvm.riscv.vlsseg6.nxv1f16(half*, i32, i32) -declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1f16(,,,,,, half*, i32, , i32) +declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1f16(,,,,,, half*, i32, , i32, i32) define @test_vlsseg6_nxv1f16(half* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv1f16: @@ -3504,20 +3409,19 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, tu, mu ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1f16(half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1f16( %1, %1, %1, %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1f16( %1, %1, %1, %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } declare {,,,,,,} @llvm.riscv.vlsseg7.nxv1f16(half*, i32, i32) -declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1f16(,,,,,,, half*, i32, , i32) +declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1f16(,,,,,,, half*, i32, , i32, i32) define @test_vlsseg7_nxv1f16(half* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv1f16: @@ -3543,20 +3447,19 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, tu, mu ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1f16(half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1f16( %1, %1, %1, %1, %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1f16( %1, %1, %1, %1, %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f16(half*, i32, i32) -declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1f16(,,,,,,,, half*, i32, , i32) +declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1f16(,,,,,,,, half*, i32, , i32, i32) define @test_vlsseg8_nxv1f16(half* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv1f16: @@ -3583,20 +3486,19 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, tu, mu ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f16(half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1f16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1f16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlsseg2.nxv1f32(float*, i32, i32) -declare {,} @llvm.riscv.vlsseg2.mask.nxv1f32(,, float*, i32, , i32) +declare {,} @llvm.riscv.vlsseg2.mask.nxv1f32(,, float*, i32, , i32, i32) define @test_vlsseg2_nxv1f32(float* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv1f32: @@ -3617,20 +3519,19 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1 ; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1f32(float* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv1f32( %1, %1, float* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv1f32( %1, %1, float* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlsseg3.nxv1f32(float*, i32, i32) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv1f32(,,, float*, i32, , i32) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv1f32(,,, float*, i32, , i32, i32) define @test_vlsseg3_nxv1f32(float* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv1f32: @@ -3652,20 +3553,19 @@ ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1 ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1f32(float* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv1f32( %1, %1, %1, float* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv1f32( %1, %1, %1, float* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlsseg4.nxv1f32(float*, i32, i32) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv1f32(,,,, float*, i32, , i32) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv1f32(,,,, float*, i32, , i32, i32) define @test_vlsseg4_nxv1f32(float* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv1f32: @@ -3688,20 +3588,19 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1f32(float* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv1f32( %1, %1, %1, %1, float* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv1f32( %1, %1, %1, %1, float* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,,,,} @llvm.riscv.vlsseg5.nxv1f32(float*, i32, i32) -declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv1f32(,,,,, float*, i32, , i32) +declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv1f32(,,,,, float*, i32, , i32, i32) define @test_vlsseg5_nxv1f32(float* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv1f32: @@ -3725,20 +3624,19 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1f32(float* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv1f32( %1, %1, %1, %1, %1, float* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv1f32( %1, %1, %1, %1, %1, float* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } declare {,,,,,} @llvm.riscv.vlsseg6.nxv1f32(float*, i32, i32) -declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1f32(,,,,,, float*, i32, , i32) +declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1f32(,,,,,, float*, i32, , i32, i32) define @test_vlsseg6_nxv1f32(float* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv1f32: @@ -3763,20 +3661,19 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1f32(float* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1f32( %1, %1, %1, %1, %1, %1, float* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1f32( %1, %1, %1, %1, %1, %1, float* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } declare {,,,,,,} @llvm.riscv.vlsseg7.nxv1f32(float*, i32, i32) -declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1f32(,,,,,,, float*, i32, , i32) +declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1f32(,,,,,,, float*, i32, , i32, i32) define @test_vlsseg7_nxv1f32(float* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv1f32: @@ -3802,20 +3699,19 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1f32(float* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1f32( %1, %1, %1, %1, %1, %1, %1, float* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1f32( %1, %1, %1, %1, %1, %1, %1, float* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f32(float*, i32, i32) -declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1f32(,,,,,,,, float*, i32, , i32) +declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1f32(,,,,,,,, float*, i32, , i32, i32) define @test_vlsseg8_nxv1f32(float* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv1f32: @@ -3842,20 +3738,19 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f32(float* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1f32( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1f32( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlsseg2.nxv8f16(half*, i32, i32) -declare {,} @llvm.riscv.vlsseg2.mask.nxv8f16(,, half*, i32, , i32) +declare {,} @llvm.riscv.vlsseg2.mask.nxv8f16(,, half*, i32, , i32, i32) define @test_vlsseg2_nxv8f16(half* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv8f16: @@ -3876,20 +3771,19 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu ; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1 ; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, tu, mu ; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8f16(half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv8f16( %1, %1, half* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv8f16( %1, %1, half* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlsseg3.nxv8f16(half*, i32, i32) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv8f16(,,, half*, i32, , i32) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv8f16(,,, half*, i32, , i32, i32) define @test_vlsseg3_nxv8f16(half* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv8f16: @@ -3911,20 +3805,19 @@ ; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1 ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, tu, mu ; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv8f16(half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv8f16( %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv8f16( %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlsseg4.nxv8f16(half*, i32, i32) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv8f16(,,,, half*, i32, , i32) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv8f16(,,,, half*, i32, , i32, i32) define @test_vlsseg4_nxv8f16(half* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv8f16: @@ -3947,20 +3840,19 @@ ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, tu, mu ; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv8f16(half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv8f16( %1, %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv8f16( %1, %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlsseg2.nxv8f32(float*, i32, i32) -declare {,} @llvm.riscv.vlsseg2.mask.nxv8f32(,, float*, i32, , i32) +declare {,} @llvm.riscv.vlsseg2.mask.nxv8f32(,, float*, i32, , i32, i32) define @test_vlsseg2_nxv8f32(float* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv8f32: @@ -3981,20 +3873,19 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu ; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1 ; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: vsetvli zero, zero, e32, m4, tu, mu ; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8f32(float* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv8f32( %1, %1, float* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv8f32( %1, %1, float* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlsseg2.nxv2f64(double*, i32, i32) -declare {,} @llvm.riscv.vlsseg2.mask.nxv2f64(,, double*, i32, , i32) +declare {,} @llvm.riscv.vlsseg2.mask.nxv2f64(,, double*, i32, , i32, i32) define @test_vlsseg2_nxv2f64(double* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv2f64: @@ -4015,20 +3906,19 @@ ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlsseg2e64.v v6, (a0), a1 ; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu ; CHECK-NEXT: vlsseg2e64.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2f64(double* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv2f64( %1, %1, double* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv2f64( %1, %1, double* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlsseg3.nxv2f64(double*, i32, i32) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv2f64(,,, double*, i32, , i32) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv2f64(,,, double*, i32, , i32, i32) define @test_vlsseg3_nxv2f64(double* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv2f64: @@ -4050,20 +3940,19 @@ ; CHECK-NEXT: vlsseg3e64.v v6, (a0), a1 ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu ; CHECK-NEXT: vlsseg3e64.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2f64(double* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv2f64( %1, %1, %1, double* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv2f64( %1, %1, %1, double* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlsseg4.nxv2f64(double*, i32, i32) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv2f64(,,,, double*, i32, , i32) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv2f64(,,,, double*, i32, , i32, i32) define @test_vlsseg4_nxv2f64(double* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv2f64: @@ -4086,20 +3975,19 @@ ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu ; CHECK-NEXT: vlsseg4e64.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2f64(double* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv2f64( %1, %1, %1, %1, double* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv2f64( %1, %1, %1, %1, double* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlsseg2.nxv4f16(half*, i32, i32) -declare {,} @llvm.riscv.vlsseg2.mask.nxv4f16(,, half*, i32, , i32) +declare {,} @llvm.riscv.vlsseg2.mask.nxv4f16(,, half*, i32, , i32, i32) define @test_vlsseg2_nxv4f16(half* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv4f16: @@ -4120,20 +4008,19 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 ; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4f16(half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv4f16( %1, %1, half* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv4f16( %1, %1, half* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlsseg3.nxv4f16(half*, i32, i32) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv4f16(,,, half*, i32, , i32) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv4f16(,,, half*, i32, , i32, i32) define @test_vlsseg3_nxv4f16(half* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv4f16: @@ -4155,20 +4042,19 @@ ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4f16(half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv4f16( %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv4f16( %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlsseg4.nxv4f16(half*, i32, i32) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv4f16(,,,, half*, i32, , i32) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv4f16(,,,, half*, i32, , i32, i32) define @test_vlsseg4_nxv4f16(half* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv4f16: @@ -4191,20 +4077,19 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4f16(half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv4f16( %1, %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv4f16( %1, %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,,,,} @llvm.riscv.vlsseg5.nxv4f16(half*, i32, i32) -declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv4f16(,,,,, half*, i32, , i32) +declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv4f16(,,,,, half*, i32, , i32, i32) define @test_vlsseg5_nxv4f16(half* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv4f16: @@ -4228,20 +4113,19 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv4f16(half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv4f16( %1, %1, %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv4f16( %1, %1, %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } declare {,,,,,} @llvm.riscv.vlsseg6.nxv4f16(half*, i32, i32) -declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv4f16(,,,,,, half*, i32, , i32) +declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv4f16(,,,,,, half*, i32, , i32, i32) define @test_vlsseg6_nxv4f16(half* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv4f16: @@ -4266,20 +4150,19 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv4f16(half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv4f16( %1, %1, %1, %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv4f16( %1, %1, %1, %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } declare {,,,,,,} @llvm.riscv.vlsseg7.nxv4f16(half*, i32, i32) -declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv4f16(,,,,,,, half*, i32, , i32) +declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv4f16(,,,,,,, half*, i32, , i32, i32) define @test_vlsseg7_nxv4f16(half* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv4f16: @@ -4305,20 +4188,19 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv4f16(half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv4f16( %1, %1, %1, %1, %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv4f16( %1, %1, %1, %1, %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv4f16(half*, i32, i32) -declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv4f16(,,,,,,,, half*, i32, , i32) +declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv4f16(,,,,,,,, half*, i32, , i32, i32) define @test_vlsseg8_nxv4f16(half* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv4f16: @@ -4345,20 +4227,19 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv4f16(half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv4f16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv4f16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlsseg2.nxv2f16(half*, i32, i32) -declare {,} @llvm.riscv.vlsseg2.mask.nxv2f16(,, half*, i32, , i32) +declare {,} @llvm.riscv.vlsseg2.mask.nxv2f16(,, half*, i32, , i32, i32) define @test_vlsseg2_nxv2f16(half* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv2f16: @@ -4379,20 +4260,19 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 ; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2f16(half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv2f16( %1, %1, half* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv2f16( %1, %1, half* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlsseg3.nxv2f16(half*, i32, i32) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv2f16(,,, half*, i32, , i32) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv2f16(,,, half*, i32, , i32, i32) define @test_vlsseg3_nxv2f16(half* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv2f16: @@ -4414,20 +4294,19 @@ ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2f16(half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv2f16( %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv2f16( %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlsseg4.nxv2f16(half*, i32, i32) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv2f16(,,,, half*, i32, , i32) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv2f16(,,,, half*, i32, , i32, i32) define @test_vlsseg4_nxv2f16(half* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv2f16: @@ -4450,20 +4329,19 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2f16(half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv2f16( %1, %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv2f16( %1, %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,,,,} @llvm.riscv.vlsseg5.nxv2f16(half*, i32, i32) -declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv2f16(,,,,, half*, i32, , i32) +declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv2f16(,,,,, half*, i32, , i32, i32) define @test_vlsseg5_nxv2f16(half* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv2f16: @@ -4487,20 +4365,19 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2f16(half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv2f16( %1, %1, %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv2f16( %1, %1, %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } declare {,,,,,} @llvm.riscv.vlsseg6.nxv2f16(half*, i32, i32) -declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2f16(,,,,,, half*, i32, , i32) +declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2f16(,,,,,, half*, i32, , i32, i32) define @test_vlsseg6_nxv2f16(half* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv2f16: @@ -4525,20 +4402,19 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2f16(half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2f16( %1, %1, %1, %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2f16( %1, %1, %1, %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } declare {,,,,,,} @llvm.riscv.vlsseg7.nxv2f16(half*, i32, i32) -declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2f16(,,,,,,, half*, i32, , i32) +declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2f16(,,,,,,, half*, i32, , i32, i32) define @test_vlsseg7_nxv2f16(half* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv2f16: @@ -4564,20 +4440,19 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2f16(half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2f16( %1, %1, %1, %1, %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2f16( %1, %1, %1, %1, %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv2f16(half*, i32, i32) -declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2f16(,,,,,,,, half*, i32, , i32) +declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2f16(,,,,,,,, half*, i32, , i32, i32) define @test_vlsseg8_nxv2f16(half* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv2f16: @@ -4604,20 +4479,19 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2f16(half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2f16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2f16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlsseg2.nxv4f32(float*, i32, i32) -declare {,} @llvm.riscv.vlsseg2.mask.nxv4f32(,, float*, i32, , i32) +declare {,} @llvm.riscv.vlsseg2.mask.nxv4f32(,, float*, i32, , i32, i32) define @test_vlsseg2_nxv4f32(float* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv4f32: @@ -4638,20 +4512,19 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu ; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1 ; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vsetvli zero, zero, e32, m2, tu, mu ; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4f32(float* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv4f32( %1, %1, float* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv4f32( %1, %1, float* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlsseg3.nxv4f32(float*, i32, i32) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv4f32(,,, float*, i32, , i32) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv4f32(,,, float*, i32, , i32, i32) define @test_vlsseg3_nxv4f32(float* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv4f32: @@ -4673,20 +4546,19 @@ ; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1 ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vsetvli zero, zero, e32, m2, tu, mu ; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4f32(float* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv4f32( %1, %1, %1, float* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv4f32( %1, %1, %1, float* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlsseg4.nxv4f32(float*, i32, i32) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv4f32(,,,, float*, i32, , i32) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv4f32(,,,, float*, i32, , i32, i32) define @test_vlsseg4_nxv4f32(float* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv4f32: @@ -4709,14 +4581,13 @@ ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vsetvli zero, zero, e32, m2, tu, mu ; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4f32(float* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv4f32( %1, %1, %1, %1, float* %base, i32 %offset, %mask, i32 %vl) + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv4f32( %1, %1, %1, %1, float* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv64.ll @@ -3,7 +3,7 @@ ; RUN: -verify-machineinstrs < %s | FileCheck %s declare {,} @llvm.riscv.vlsseg2.nxv16i16(i16*, i64, i64) -declare {,} @llvm.riscv.vlsseg2.mask.nxv16i16(,, i16*, i64, , i64) +declare {,} @llvm.riscv.vlsseg2.mask.nxv16i16(,, i16*, i64, , i64, i64) define @test_vlsseg2_nxv16i16(i16* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv16i16: @@ -24,20 +24,19 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu ; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1 ; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: vsetvli zero, zero, e16, m4, tu, mu ; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16i16(i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv16i16( %1, %1, i16* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv16i16( %1, %1, i16* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlsseg2.nxv4i32(i32*, i64, i64) -declare {,} @llvm.riscv.vlsseg2.mask.nxv4i32(,, i32*, i64, , i64) +declare {,} @llvm.riscv.vlsseg2.mask.nxv4i32(,, i32*, i64, , i64, i64) define @test_vlsseg2_nxv4i32(i32* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv4i32: @@ -58,20 +57,19 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu ; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1 ; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vsetvli zero, zero, e32, m2, tu, mu ; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i32(i32* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv4i32( %1, %1, i32* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv4i32( %1, %1, i32* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlsseg3.nxv4i32(i32*, i64, i64) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv4i32(,,, i32*, i64, , i64) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv4i32(,,, i32*, i64, , i64, i64) define @test_vlsseg3_nxv4i32(i32* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv4i32: @@ -93,20 +91,19 @@ ; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1 ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vsetvli zero, zero, e32, m2, tu, mu ; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4i32(i32* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv4i32( %1, %1, %1, i32* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv4i32( %1, %1, %1, i32* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlsseg4.nxv4i32(i32*, i64, i64) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv4i32(,,,, i32*, i64, , i64) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv4i32(,,,, i32*, i64, , i64, i64) define @test_vlsseg4_nxv4i32(i32* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv4i32: @@ -129,20 +126,19 @@ ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vsetvli zero, zero, e32, m2, tu, mu ; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4i32(i32* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv4i32( %1, %1, %1, %1, i32* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv4i32( %1, %1, %1, %1, i32* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlsseg2.nxv16i8(i8*, i64, i64) -declare {,} @llvm.riscv.vlsseg2.mask.nxv16i8(,, i8*, i64, , i64) +declare {,} @llvm.riscv.vlsseg2.mask.nxv16i8(,, i8*, i64, , i64, i64) define @test_vlsseg2_nxv16i8(i8* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv16i8: @@ -163,20 +159,19 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; CHECK-NEXT: vlsseg2e8.v v6, (a0), a1 ; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vsetvli zero, zero, e8, m2, tu, mu ; CHECK-NEXT: vlsseg2e8.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16i8(i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv16i8( %1, %1, i8* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv16i8( %1, %1, i8* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlsseg3.nxv16i8(i8*, i64, i64) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv16i8(,,, i8*, i64, , i64) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv16i8(,,, i8*, i64, , i64, i64) define @test_vlsseg3_nxv16i8(i8* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv16i8: @@ -198,20 +193,19 @@ ; CHECK-NEXT: vlsseg3e8.v v6, (a0), a1 ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vsetvli zero, zero, e8, m2, tu, mu ; CHECK-NEXT: vlsseg3e8.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv16i8(i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv16i8( %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv16i8( %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlsseg4.nxv16i8(i8*, i64, i64) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv16i8(,,,, i8*, i64, , i64) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv16i8(,,,, i8*, i64, , i64, i64) define @test_vlsseg4_nxv16i8(i8* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv16i8: @@ -234,20 +228,19 @@ ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vsetvli zero, zero, e8, m2, tu, mu ; CHECK-NEXT: vlsseg4e8.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv16i8(i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv16i8( %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv16i8( %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlsseg2.nxv1i64(i64*, i64, i64) -declare {,} @llvm.riscv.vlsseg2.mask.nxv1i64(,, i64*, i64, , i64) +declare {,} @llvm.riscv.vlsseg2.mask.nxv1i64(,, i64*, i64, , i64, i64) define @test_vlsseg2_nxv1i64(i64* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv1i64: @@ -268,20 +261,19 @@ ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlsseg2e64.v v7, (a0), a1 ; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; CHECK-NEXT: vlsseg2e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i64(i64* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv1i64( %1, %1, i64* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv1i64( %1, %1, i64* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlsseg3.nxv1i64(i64*, i64, i64) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv1i64(,,, i64*, i64, , i64) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv1i64(,,, i64*, i64, , i64, i64) define @test_vlsseg3_nxv1i64(i64* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv1i64: @@ -303,20 +295,19 @@ ; CHECK-NEXT: vlsseg3e64.v v7, (a0), a1 ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; CHECK-NEXT: vlsseg3e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i64(i64* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv1i64( %1, %1, %1, i64* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv1i64( %1, %1, %1, i64* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlsseg4.nxv1i64(i64*, i64, i64) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv1i64(,,,, i64*, i64, , i64) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv1i64(,,,, i64*, i64, , i64, i64) define @test_vlsseg4_nxv1i64(i64* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv1i64: @@ -339,20 +330,19 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; CHECK-NEXT: vlsseg4e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i64(i64* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv1i64( %1, %1, %1, %1, i64* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv1i64( %1, %1, %1, %1, i64* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,,,,} @llvm.riscv.vlsseg5.nxv1i64(i64*, i64, i64) -declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i64(,,,,, i64*, i64, , i64) +declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i64(,,,,, i64*, i64, , i64, i64) define @test_vlsseg5_nxv1i64(i64* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv1i64: @@ -376,20 +366,19 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; CHECK-NEXT: vlsseg5e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i64(i64* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i64( %1, %1, %1, %1, %1, i64* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i64( %1, %1, %1, %1, %1, i64* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } declare {,,,,,} @llvm.riscv.vlsseg6.nxv1i64(i64*, i64, i64) -declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i64(,,,,,, i64*, i64, , i64) +declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i64(,,,,,, i64*, i64, , i64, i64) define @test_vlsseg6_nxv1i64(i64* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv1i64: @@ -414,20 +403,19 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; CHECK-NEXT: vlsseg6e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i64(i64* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i64( %1, %1, %1, %1, %1, %1, i64* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i64( %1, %1, %1, %1, %1, %1, i64* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } declare {,,,,,,} @llvm.riscv.vlsseg7.nxv1i64(i64*, i64, i64) -declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i64(,,,,,,, i64*, i64, , i64) +declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i64(,,,,,,, i64*, i64, , i64, i64) define @test_vlsseg7_nxv1i64(i64* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv1i64: @@ -453,20 +441,19 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; CHECK-NEXT: vlsseg7e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i64(i64* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i64( %1, %1, %1, %1, %1, %1, %1, i64* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i64( %1, %1, %1, %1, %1, %1, %1, i64* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i64(i64*, i64, i64) -declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i64(,,,,,,,, i64*, i64, , i64) +declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i64(,,,,,,,, i64*, i64, , i64, i64) define @test_vlsseg8_nxv1i64(i64* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv1i64: @@ -493,20 +480,19 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; CHECK-NEXT: vlsseg8e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i64(i64* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i64( %1, %1, %1, %1, %1, %1, %1, %1, i64* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i64( %1, %1, %1, %1, %1, %1, %1, %1, i64* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlsseg2.nxv1i32(i32*, i64, i64) -declare {,} @llvm.riscv.vlsseg2.mask.nxv1i32(,, i32*, i64, , i64) +declare {,} @llvm.riscv.vlsseg2.mask.nxv1i32(,, i32*, i64, , i64, i64) define @test_vlsseg2_nxv1i32(i32* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv1i32: @@ -527,20 +513,19 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1 ; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i32(i32* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv1i32( %1, %1, i32* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv1i32( %1, %1, i32* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlsseg3.nxv1i32(i32*, i64, i64) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv1i32(,,, i32*, i64, , i64) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv1i32(,,, i32*, i64, , i64, i64) define @test_vlsseg3_nxv1i32(i32* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv1i32: @@ -562,20 +547,19 @@ ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1 ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i32(i32* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv1i32( %1, %1, %1, i32* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv1i32( %1, %1, %1, i32* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlsseg4.nxv1i32(i32*, i64, i64) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv1i32(,,,, i32*, i64, , i64) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv1i32(,,,, i32*, i64, , i64, i64) define @test_vlsseg4_nxv1i32(i32* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv1i32: @@ -598,20 +582,19 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i32(i32* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv1i32( %1, %1, %1, %1, i32* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv1i32( %1, %1, %1, %1, i32* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,,,,} @llvm.riscv.vlsseg5.nxv1i32(i32*, i64, i64) -declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i32(,,,,, i32*, i64, , i64) +declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i32(,,,,, i32*, i64, , i64, i64) define @test_vlsseg5_nxv1i32(i32* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv1i32: @@ -635,20 +618,19 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i32(i32* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i32( %1, %1, %1, %1, %1, i32* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i32( %1, %1, %1, %1, %1, i32* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } declare {,,,,,} @llvm.riscv.vlsseg6.nxv1i32(i32*, i64, i64) -declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i32(,,,,,, i32*, i64, , i64) +declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i32(,,,,,, i32*, i64, , i64, i64) define @test_vlsseg6_nxv1i32(i32* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv1i32: @@ -673,20 +655,19 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i32(i32* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i32( %1, %1, %1, %1, %1, %1, i32* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i32( %1, %1, %1, %1, %1, %1, i32* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } declare {,,,,,,} @llvm.riscv.vlsseg7.nxv1i32(i32*, i64, i64) -declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i32(,,,,,,, i32*, i64, , i64) +declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i32(,,,,,,, i32*, i64, , i64, i64) define @test_vlsseg7_nxv1i32(i32* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv1i32: @@ -712,20 +693,19 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i32(i32* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i32( %1, %1, %1, %1, %1, %1, %1, i32* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i32( %1, %1, %1, %1, %1, %1, %1, i32* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i32(i32*, i64, i64) -declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i32(,,,,,,,, i32*, i64, , i64) +declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i32(,,,,,,,, i32*, i64, , i64, i64) define @test_vlsseg8_nxv1i32(i32* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv1i32: @@ -752,20 +732,19 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i32(i32* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i32( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i32( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlsseg2.nxv8i16(i16*, i64, i64) -declare {,} @llvm.riscv.vlsseg2.mask.nxv8i16(,, i16*, i64, , i64) +declare {,} @llvm.riscv.vlsseg2.mask.nxv8i16(,, i16*, i64, , i64, i64) define @test_vlsseg2_nxv8i16(i16* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv8i16: @@ -786,20 +765,19 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu ; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1 ; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, tu, mu ; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8i16(i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv8i16( %1, %1, i16* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv8i16( %1, %1, i16* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlsseg3.nxv8i16(i16*, i64, i64) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv8i16(,,, i16*, i64, , i64) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv8i16(,,, i16*, i64, , i64, i64) define @test_vlsseg3_nxv8i16(i16* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv8i16: @@ -821,20 +799,19 @@ ; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1 ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, tu, mu ; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv8i16(i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv8i16( %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv8i16( %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlsseg4.nxv8i16(i16*, i64, i64) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv8i16(,,,, i16*, i64, , i64) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv8i16(,,,, i16*, i64, , i64, i64) define @test_vlsseg4_nxv8i16(i16* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv8i16: @@ -857,20 +834,19 @@ ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, tu, mu ; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv8i16(i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv8i16( %1, %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv8i16( %1, %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlsseg2.nxv4i8(i8*, i64, i64) -declare {,} @llvm.riscv.vlsseg2.mask.nxv4i8(,, i8*, i64, , i64) +declare {,} @llvm.riscv.vlsseg2.mask.nxv4i8(,, i8*, i64, , i64, i64) define @test_vlsseg2_nxv4i8(i8* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv4i8: @@ -891,20 +867,19 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1 ; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf2, tu, mu ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i8(i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv4i8( %1, %1, i8* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv4i8( %1, %1, i8* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlsseg3.nxv4i8(i8*, i64, i64) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv4i8(,,, i8*, i64, , i64) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv4i8(,,, i8*, i64, , i64, i64) define @test_vlsseg3_nxv4i8(i8* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv4i8: @@ -926,20 +901,19 @@ ; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1 ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf2, tu, mu ; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4i8(i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv4i8( %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv4i8( %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlsseg4.nxv4i8(i8*, i64, i64) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv4i8(,,,, i8*, i64, , i64) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv4i8(,,,, i8*, i64, , i64, i64) define @test_vlsseg4_nxv4i8(i8* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv4i8: @@ -962,20 +936,19 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf2, tu, mu ; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4i8(i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv4i8( %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv4i8( %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,,,,} @llvm.riscv.vlsseg5.nxv4i8(i8*, i64, i64) -declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv4i8(,,,,, i8*, i64, , i64) +declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv4i8(,,,,, i8*, i64, , i64, i64) define @test_vlsseg5_nxv4i8(i8* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv4i8: @@ -999,20 +972,19 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf2, tu, mu ; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv4i8(i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv4i8( %1, %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv4i8( %1, %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } declare {,,,,,} @llvm.riscv.vlsseg6.nxv4i8(i8*, i64, i64) -declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv4i8(,,,,,, i8*, i64, , i64) +declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv4i8(,,,,,, i8*, i64, , i64, i64) define @test_vlsseg6_nxv4i8(i8* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv4i8: @@ -1037,20 +1009,19 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf2, tu, mu ; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv4i8(i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv4i8( %1, %1, %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv4i8( %1, %1, %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } declare {,,,,,,} @llvm.riscv.vlsseg7.nxv4i8(i8*, i64, i64) -declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv4i8(,,,,,,, i8*, i64, , i64) +declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv4i8(,,,,,,, i8*, i64, , i64, i64) define @test_vlsseg7_nxv4i8(i8* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv4i8: @@ -1076,20 +1047,19 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf2, tu, mu ; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv4i8(i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv4i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv4i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv4i8(i8*, i64, i64) -declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv4i8(,,,,,,,, i8*, i64, , i64) +declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv4i8(,,,,,,,, i8*, i64, , i64, i64) define @test_vlsseg8_nxv4i8(i8* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv4i8: @@ -1116,20 +1086,19 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf2, tu, mu ; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv4i8(i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv4i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv4i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlsseg2.nxv1i16(i16*, i64, i64) -declare {,} @llvm.riscv.vlsseg2.mask.nxv1i16(,, i16*, i64, , i64) +declare {,} @llvm.riscv.vlsseg2.mask.nxv1i16(,, i16*, i64, , i64, i64) define @test_vlsseg2_nxv1i16(i16* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv1i16: @@ -1150,20 +1119,19 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 ; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, tu, mu ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i16(i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv1i16( %1, %1, i16* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv1i16( %1, %1, i16* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlsseg3.nxv1i16(i16*, i64, i64) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv1i16(,,, i16*, i64, , i64) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv1i16(,,, i16*, i64, , i64, i64) define @test_vlsseg3_nxv1i16(i16* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv1i16: @@ -1185,20 +1153,19 @@ ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, tu, mu ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i16(i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv1i16( %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv1i16( %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlsseg4.nxv1i16(i16*, i64, i64) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv1i16(,,,, i16*, i64, , i64) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv1i16(,,,, i16*, i64, , i64, i64) define @test_vlsseg4_nxv1i16(i16* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv1i16: @@ -1221,20 +1188,19 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, tu, mu ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i16(i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv1i16( %1, %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv1i16( %1, %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,,,,} @llvm.riscv.vlsseg5.nxv1i16(i16*, i64, i64) -declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i16(,,,,, i16*, i64, , i64) +declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i16(,,,,, i16*, i64, , i64, i64) define @test_vlsseg5_nxv1i16(i16* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv1i16: @@ -1258,20 +1224,19 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, tu, mu ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i16(i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i16( %1, %1, %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i16( %1, %1, %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } declare {,,,,,} @llvm.riscv.vlsseg6.nxv1i16(i16*, i64, i64) -declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i16(,,,,,, i16*, i64, , i64) +declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i16(,,,,,, i16*, i64, , i64, i64) define @test_vlsseg6_nxv1i16(i16* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv1i16: @@ -1296,20 +1261,19 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, tu, mu ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i16(i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i16( %1, %1, %1, %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i16( %1, %1, %1, %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } declare {,,,,,,} @llvm.riscv.vlsseg7.nxv1i16(i16*, i64, i64) -declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i16(,,,,,,, i16*, i64, , i64) +declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i16(,,,,,,, i16*, i64, , i64, i64) define @test_vlsseg7_nxv1i16(i16* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv1i16: @@ -1335,20 +1299,19 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, tu, mu ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i16(i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i16(i16*, i64, i64) -declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i16(,,,,,,,, i16*, i64, , i64) +declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i16(,,,,,,,, i16*, i64, , i64, i64) define @test_vlsseg8_nxv1i16(i16* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv1i16: @@ -1375,20 +1338,19 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, tu, mu ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i16(i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlsseg2.nxv2i32(i32*, i64, i64) -declare {,} @llvm.riscv.vlsseg2.mask.nxv2i32(,, i32*, i64, , i64) +declare {,} @llvm.riscv.vlsseg2.mask.nxv2i32(,, i32*, i64, , i64, i64) define @test_vlsseg2_nxv2i32(i32* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv2i32: @@ -1409,20 +1371,19 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1 ; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i32(i32* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv2i32( %1, %1, i32* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv2i32( %1, %1, i32* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlsseg3.nxv2i32(i32*, i64, i64) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv2i32(,,, i32*, i64, , i64) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv2i32(,,, i32*, i64, , i64, i64) define @test_vlsseg3_nxv2i32(i32* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv2i32: @@ -1444,20 +1405,19 @@ ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1 ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i32(i32* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv2i32( %1, %1, %1, i32* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv2i32( %1, %1, %1, i32* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlsseg4.nxv2i32(i32*, i64, i64) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv2i32(,,,, i32*, i64, , i64) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv2i32(,,,, i32*, i64, , i64, i64) define @test_vlsseg4_nxv2i32(i32* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv2i32: @@ -1480,20 +1440,19 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i32(i32* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv2i32( %1, %1, %1, %1, i32* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv2i32( %1, %1, %1, %1, i32* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,,,,} @llvm.riscv.vlsseg5.nxv2i32(i32*, i64, i64) -declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv2i32(,,,,, i32*, i64, , i64) +declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv2i32(,,,,, i32*, i64, , i64, i64) define @test_vlsseg5_nxv2i32(i32* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv2i32: @@ -1517,20 +1476,19 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2i32(i32* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv2i32( %1, %1, %1, %1, %1, i32* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv2i32( %1, %1, %1, %1, %1, i32* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } declare {,,,,,} @llvm.riscv.vlsseg6.nxv2i32(i32*, i64, i64) -declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2i32(,,,,,, i32*, i64, , i64) +declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2i32(,,,,,, i32*, i64, , i64, i64) define @test_vlsseg6_nxv2i32(i32* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv2i32: @@ -1555,20 +1513,19 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2i32(i32* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2i32( %1, %1, %1, %1, %1, %1, i32* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2i32( %1, %1, %1, %1, %1, %1, i32* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } declare {,,,,,,} @llvm.riscv.vlsseg7.nxv2i32(i32*, i64, i64) -declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2i32(,,,,,,, i32*, i64, , i64) +declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2i32(,,,,,,, i32*, i64, , i64, i64) define @test_vlsseg7_nxv2i32(i32* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv2i32: @@ -1594,20 +1551,19 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2i32(i32* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2i32( %1, %1, %1, %1, %1, %1, %1, i32* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2i32( %1, %1, %1, %1, %1, %1, %1, i32* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i32(i32*, i64, i64) -declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2i32(,,,,,,,, i32*, i64, , i64) +declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2i32(,,,,,,,, i32*, i64, , i64, i64) define @test_vlsseg8_nxv2i32(i32* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv2i32: @@ -1634,20 +1590,19 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i32(i32* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2i32( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2i32( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlsseg2.nxv8i8(i8*, i64, i64) -declare {,} @llvm.riscv.vlsseg2.mask.nxv8i8(,, i8*, i64, , i64) +declare {,} @llvm.riscv.vlsseg2.mask.nxv8i8(,, i8*, i64, , i64, i64) define @test_vlsseg2_nxv8i8(i8* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv8i8: @@ -1668,20 +1623,19 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1 ; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, m1, tu, mu ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8i8(i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv8i8( %1, %1, i8* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv8i8( %1, %1, i8* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlsseg3.nxv8i8(i8*, i64, i64) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv8i8(,,, i8*, i64, , i64) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv8i8(,,, i8*, i64, , i64, i64) define @test_vlsseg3_nxv8i8(i8* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv8i8: @@ -1703,20 +1657,19 @@ ; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1 ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, m1, tu, mu ; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv8i8(i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv8i8( %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv8i8( %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlsseg4.nxv8i8(i8*, i64, i64) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv8i8(,,,, i8*, i64, , i64) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv8i8(,,,, i8*, i64, , i64, i64) define @test_vlsseg4_nxv8i8(i8* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv8i8: @@ -1739,20 +1692,19 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, m1, tu, mu ; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv8i8(i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv8i8( %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv8i8( %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,,,,} @llvm.riscv.vlsseg5.nxv8i8(i8*, i64, i64) -declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv8i8(,,,,, i8*, i64, , i64) +declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv8i8(,,,,, i8*, i64, , i64, i64) define @test_vlsseg5_nxv8i8(i8* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv8i8: @@ -1776,20 +1728,19 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, m1, tu, mu ; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv8i8(i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv8i8( %1, %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv8i8( %1, %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } declare {,,,,,} @llvm.riscv.vlsseg6.nxv8i8(i8*, i64, i64) -declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv8i8(,,,,,, i8*, i64, , i64) +declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv8i8(,,,,,, i8*, i64, , i64, i64) define @test_vlsseg6_nxv8i8(i8* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv8i8: @@ -1814,20 +1765,19 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, m1, tu, mu ; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv8i8(i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv8i8( %1, %1, %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv8i8( %1, %1, %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } declare {,,,,,,} @llvm.riscv.vlsseg7.nxv8i8(i8*, i64, i64) -declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv8i8(,,,,,,, i8*, i64, , i64) +declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv8i8(,,,,,,, i8*, i64, , i64, i64) define @test_vlsseg7_nxv8i8(i8* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv8i8: @@ -1853,20 +1803,19 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, m1, tu, mu ; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv8i8(i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv8i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv8i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv8i8(i8*, i64, i64) -declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv8i8(,,,,,,,, i8*, i64, , i64) +declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv8i8(,,,,,,,, i8*, i64, , i64, i64) define @test_vlsseg8_nxv8i8(i8* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv8i8: @@ -1893,20 +1842,19 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, m1, tu, mu ; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv8i8(i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv8i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv8i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlsseg2.nxv4i64(i64*, i64, i64) -declare {,} @llvm.riscv.vlsseg2.mask.nxv4i64(,, i64*, i64, , i64) +declare {,} @llvm.riscv.vlsseg2.mask.nxv4i64(,, i64*, i64, , i64, i64) define @test_vlsseg2_nxv4i64(i64* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv4i64: @@ -1927,20 +1875,19 @@ ; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlsseg2e64.v v4, (a0), a1 ; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu ; CHECK-NEXT: vlsseg2e64.v v4, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i64(i64* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv4i64( %1, %1, i64* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv4i64( %1, %1, i64* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlsseg2.nxv4i16(i16*, i64, i64) -declare {,} @llvm.riscv.vlsseg2.mask.nxv4i16(,, i16*, i64, , i64) +declare {,} @llvm.riscv.vlsseg2.mask.nxv4i16(,, i16*, i64, , i64, i64) define @test_vlsseg2_nxv4i16(i16* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv4i16: @@ -1961,20 +1908,19 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 ; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i16(i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv4i16( %1, %1, i16* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv4i16( %1, %1, i16* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlsseg3.nxv4i16(i16*, i64, i64) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv4i16(,,, i16*, i64, , i64) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv4i16(,,, i16*, i64, , i64, i64) define @test_vlsseg3_nxv4i16(i16* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv4i16: @@ -1996,20 +1942,19 @@ ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4i16(i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv4i16( %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv4i16( %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlsseg4.nxv4i16(i16*, i64, i64) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv4i16(,,,, i16*, i64, , i64) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv4i16(,,,, i16*, i64, , i64, i64) define @test_vlsseg4_nxv4i16(i16* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv4i16: @@ -2032,20 +1977,19 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4i16(i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv4i16( %1, %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv4i16( %1, %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,,,,} @llvm.riscv.vlsseg5.nxv4i16(i16*, i64, i64) -declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv4i16(,,,,, i16*, i64, , i64) +declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv4i16(,,,,, i16*, i64, , i64, i64) define @test_vlsseg5_nxv4i16(i16* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv4i16: @@ -2069,20 +2013,19 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv4i16(i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv4i16( %1, %1, %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv4i16( %1, %1, %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } declare {,,,,,} @llvm.riscv.vlsseg6.nxv4i16(i16*, i64, i64) -declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv4i16(,,,,,, i16*, i64, , i64) +declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv4i16(,,,,,, i16*, i64, , i64, i64) define @test_vlsseg6_nxv4i16(i16* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv4i16: @@ -2107,20 +2050,19 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv4i16(i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv4i16( %1, %1, %1, %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv4i16( %1, %1, %1, %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } declare {,,,,,,} @llvm.riscv.vlsseg7.nxv4i16(i16*, i64, i64) -declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv4i16(,,,,,,, i16*, i64, , i64) +declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv4i16(,,,,,,, i16*, i64, , i64, i64) define @test_vlsseg7_nxv4i16(i16* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv4i16: @@ -2146,20 +2088,19 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv4i16(i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv4i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv4i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv4i16(i16*, i64, i64) -declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv4i16(,,,,,,,, i16*, i64, , i64) +declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv4i16(,,,,,,,, i16*, i64, , i64, i64) define @test_vlsseg8_nxv4i16(i16* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv4i16: @@ -2186,20 +2127,19 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv4i16(i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv4i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv4i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlsseg2.nxv1i8(i8*, i64, i64) -declare {,} @llvm.riscv.vlsseg2.mask.nxv1i8(,, i8*, i64, , i64) +declare {,} @llvm.riscv.vlsseg2.mask.nxv1i8(,, i8*, i64, , i64, i64) define @test_vlsseg2_nxv1i8(i8* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv1i8: @@ -2220,20 +2160,19 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1 ; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, tu, mu ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i8(i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv1i8( %1, %1, i8* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv1i8( %1, %1, i8* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlsseg3.nxv1i8(i8*, i64, i64) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv1i8(,,, i8*, i64, , i64) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv1i8(,,, i8*, i64, , i64, i64) define @test_vlsseg3_nxv1i8(i8* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv1i8: @@ -2255,20 +2194,19 @@ ; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1 ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, tu, mu ; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i8(i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv1i8( %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv1i8( %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlsseg4.nxv1i8(i8*, i64, i64) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv1i8(,,,, i8*, i64, , i64) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv1i8(,,,, i8*, i64, , i64, i64) define @test_vlsseg4_nxv1i8(i8* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv1i8: @@ -2291,20 +2229,19 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, tu, mu ; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i8(i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv1i8( %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv1i8( %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,,,,} @llvm.riscv.vlsseg5.nxv1i8(i8*, i64, i64) -declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i8(,,,,, i8*, i64, , i64) +declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i8(,,,,, i8*, i64, , i64, i64) define @test_vlsseg5_nxv1i8(i8* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv1i8: @@ -2328,20 +2265,19 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, tu, mu ; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i8(i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i8( %1, %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i8( %1, %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } declare {,,,,,} @llvm.riscv.vlsseg6.nxv1i8(i8*, i64, i64) -declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i8(,,,,,, i8*, i64, , i64) +declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i8(,,,,,, i8*, i64, , i64, i64) define @test_vlsseg6_nxv1i8(i8* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv1i8: @@ -2366,20 +2302,19 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, tu, mu ; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i8(i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i8( %1, %1, %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i8( %1, %1, %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } declare {,,,,,,} @llvm.riscv.vlsseg7.nxv1i8(i8*, i64, i64) -declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i8(,,,,,,, i8*, i64, , i64) +declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i8(,,,,,,, i8*, i64, , i64, i64) define @test_vlsseg7_nxv1i8(i8* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv1i8: @@ -2405,20 +2340,19 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, tu, mu ; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i8(i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i8(i8*, i64, i64) -declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i8(,,,,,,,, i8*, i64, , i64) +declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i8(,,,,,,,, i8*, i64, , i64, i64) define @test_vlsseg8_nxv1i8(i8* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv1i8: @@ -2445,20 +2379,19 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, tu, mu ; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i8(i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlsseg2.nxv2i8(i8*, i64, i64) -declare {,} @llvm.riscv.vlsseg2.mask.nxv2i8(,, i8*, i64, , i64) +declare {,} @llvm.riscv.vlsseg2.mask.nxv2i8(,, i8*, i64, , i64, i64) define @test_vlsseg2_nxv2i8(i8* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv2i8: @@ -2479,20 +2412,19 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1 ; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, tu, mu ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i8(i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv2i8( %1, %1, i8* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv2i8( %1, %1, i8* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlsseg3.nxv2i8(i8*, i64, i64) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv2i8(,,, i8*, i64, , i64) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv2i8(,,, i8*, i64, , i64, i64) define @test_vlsseg3_nxv2i8(i8* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv2i8: @@ -2514,20 +2446,19 @@ ; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1 ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, tu, mu ; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i8(i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv2i8( %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv2i8( %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlsseg4.nxv2i8(i8*, i64, i64) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv2i8(,,,, i8*, i64, , i64) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv2i8(,,,, i8*, i64, , i64, i64) define @test_vlsseg4_nxv2i8(i8* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv2i8: @@ -2550,20 +2481,19 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, tu, mu ; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i8(i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv2i8( %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv2i8( %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,,,,} @llvm.riscv.vlsseg5.nxv2i8(i8*, i64, i64) -declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv2i8(,,,,, i8*, i64, , i64) +declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv2i8(,,,,, i8*, i64, , i64, i64) define @test_vlsseg5_nxv2i8(i8* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv2i8: @@ -2587,20 +2517,19 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, tu, mu ; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2i8(i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv2i8( %1, %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv2i8( %1, %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } declare {,,,,,} @llvm.riscv.vlsseg6.nxv2i8(i8*, i64, i64) -declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2i8(,,,,,, i8*, i64, , i64) +declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2i8(,,,,,, i8*, i64, , i64, i64) define @test_vlsseg6_nxv2i8(i8* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv2i8: @@ -2625,20 +2554,19 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, tu, mu ; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2i8(i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2i8( %1, %1, %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2i8( %1, %1, %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } declare {,,,,,,} @llvm.riscv.vlsseg7.nxv2i8(i8*, i64, i64) -declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2i8(,,,,,,, i8*, i64, , i64) +declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2i8(,,,,,,, i8*, i64, , i64, i64) define @test_vlsseg7_nxv2i8(i8* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv2i8: @@ -2664,20 +2592,19 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, tu, mu ; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2i8(i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i8(i8*, i64, i64) -declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2i8(,,,,,,,, i8*, i64, , i64) +declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2i8(,,,,,,,, i8*, i64, , i64, i64) define @test_vlsseg8_nxv2i8(i8* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv2i8: @@ -2704,20 +2631,19 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, tu, mu ; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i8(i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlsseg2.nxv8i32(i32*, i64, i64) -declare {,} @llvm.riscv.vlsseg2.mask.nxv8i32(,, i32*, i64, , i64) +declare {,} @llvm.riscv.vlsseg2.mask.nxv8i32(,, i32*, i64, , i64, i64) define @test_vlsseg2_nxv8i32(i32* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv8i32: @@ -2738,20 +2664,19 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu ; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1 ; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: vsetvli zero, zero, e32, m4, tu, mu ; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8i32(i32* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv8i32( %1, %1, i32* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv8i32( %1, %1, i32* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlsseg2.nxv32i8(i8*, i64, i64) -declare {,} @llvm.riscv.vlsseg2.mask.nxv32i8(,, i8*, i64, , i64) +declare {,} @llvm.riscv.vlsseg2.mask.nxv32i8(,, i8*, i64, , i64, i64) define @test_vlsseg2_nxv32i8(i8* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv32i8: @@ -2772,20 +2697,19 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu ; CHECK-NEXT: vlsseg2e8.v v4, (a0), a1 ; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: vsetvli zero, zero, e8, m4, tu, mu ; CHECK-NEXT: vlsseg2e8.v v4, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv32i8(i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv32i8( %1, %1, i8* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv32i8( %1, %1, i8* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlsseg2.nxv2i16(i16*, i64, i64) -declare {,} @llvm.riscv.vlsseg2.mask.nxv2i16(,, i16*, i64, , i64) +declare {,} @llvm.riscv.vlsseg2.mask.nxv2i16(,, i16*, i64, , i64, i64) define @test_vlsseg2_nxv2i16(i16* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv2i16: @@ -2806,20 +2730,19 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 ; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i16(i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv2i16( %1, %1, i16* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv2i16( %1, %1, i16* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlsseg3.nxv2i16(i16*, i64, i64) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv2i16(,,, i16*, i64, , i64) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv2i16(,,, i16*, i64, , i64, i64) define @test_vlsseg3_nxv2i16(i16* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv2i16: @@ -2841,20 +2764,19 @@ ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i16(i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv2i16( %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv2i16( %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlsseg4.nxv2i16(i16*, i64, i64) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv2i16(,,,, i16*, i64, , i64) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv2i16(,,,, i16*, i64, , i64, i64) define @test_vlsseg4_nxv2i16(i16* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv2i16: @@ -2877,20 +2799,19 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i16(i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv2i16( %1, %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv2i16( %1, %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,,,,} @llvm.riscv.vlsseg5.nxv2i16(i16*, i64, i64) -declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv2i16(,,,,, i16*, i64, , i64) +declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv2i16(,,,,, i16*, i64, , i64, i64) define @test_vlsseg5_nxv2i16(i16* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv2i16: @@ -2914,20 +2835,19 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2i16(i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv2i16( %1, %1, %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv2i16( %1, %1, %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } declare {,,,,,} @llvm.riscv.vlsseg6.nxv2i16(i16*, i64, i64) -declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2i16(,,,,,, i16*, i64, , i64) +declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2i16(,,,,,, i16*, i64, , i64, i64) define @test_vlsseg6_nxv2i16(i16* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv2i16: @@ -2952,20 +2872,19 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2i16(i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2i16( %1, %1, %1, %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2i16( %1, %1, %1, %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } declare {,,,,,,} @llvm.riscv.vlsseg7.nxv2i16(i16*, i64, i64) -declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2i16(,,,,,,, i16*, i64, , i64) +declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2i16(,,,,,,, i16*, i64, , i64, i64) define @test_vlsseg7_nxv2i16(i16* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv2i16: @@ -2991,20 +2910,19 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2i16(i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i16(i16*, i64, i64) -declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2i16(,,,,,,,, i16*, i64, , i64) +declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2i16(,,,,,,,, i16*, i64, , i64, i64) define @test_vlsseg8_nxv2i16(i16* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv2i16: @@ -3031,20 +2949,19 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i16(i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlsseg2.nxv2i64(i64*, i64, i64) -declare {,} @llvm.riscv.vlsseg2.mask.nxv2i64(,, i64*, i64, , i64) +declare {,} @llvm.riscv.vlsseg2.mask.nxv2i64(,, i64*, i64, , i64, i64) define @test_vlsseg2_nxv2i64(i64* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv2i64: @@ -3065,20 +2982,19 @@ ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlsseg2e64.v v6, (a0), a1 ; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu ; CHECK-NEXT: vlsseg2e64.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i64(i64* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv2i64( %1, %1, i64* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv2i64( %1, %1, i64* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlsseg3.nxv2i64(i64*, i64, i64) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv2i64(,,, i64*, i64, , i64) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv2i64(,,, i64*, i64, , i64, i64) define @test_vlsseg3_nxv2i64(i64* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv2i64: @@ -3100,20 +3016,19 @@ ; CHECK-NEXT: vlsseg3e64.v v6, (a0), a1 ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu ; CHECK-NEXT: vlsseg3e64.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i64(i64* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv2i64( %1, %1, %1, i64* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv2i64( %1, %1, %1, i64* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlsseg4.nxv2i64(i64*, i64, i64) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv2i64(,,,, i64*, i64, , i64) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv2i64(,,,, i64*, i64, , i64, i64) define @test_vlsseg4_nxv2i64(i64* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv2i64: @@ -3136,20 +3051,19 @@ ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu ; CHECK-NEXT: vlsseg4e64.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i64(i64* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv2i64( %1, %1, %1, %1, i64* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv2i64( %1, %1, %1, %1, i64* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlsseg2.nxv16f16(half*, i64, i64) -declare {,} @llvm.riscv.vlsseg2.mask.nxv16f16(,, half*, i64, , i64) +declare {,} @llvm.riscv.vlsseg2.mask.nxv16f16(,, half*, i64, , i64, i64) define @test_vlsseg2_nxv16f16(half* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv16f16: @@ -3170,20 +3084,19 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu ; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1 ; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: vsetvli zero, zero, e16, m4, tu, mu ; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16f16(half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv16f16( %1, %1, half* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv16f16( %1, %1, half* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlsseg2.nxv4f64(double*, i64, i64) -declare {,} @llvm.riscv.vlsseg2.mask.nxv4f64(,, double*, i64, , i64) +declare {,} @llvm.riscv.vlsseg2.mask.nxv4f64(,, double*, i64, , i64, i64) define @test_vlsseg2_nxv4f64(double* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv4f64: @@ -3204,20 +3117,19 @@ ; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlsseg2e64.v v4, (a0), a1 ; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu ; CHECK-NEXT: vlsseg2e64.v v4, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4f64(double* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv4f64( %1, %1, double* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv4f64( %1, %1, double* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlsseg2.nxv1f64(double*, i64, i64) -declare {,} @llvm.riscv.vlsseg2.mask.nxv1f64(,, double*, i64, , i64) +declare {,} @llvm.riscv.vlsseg2.mask.nxv1f64(,, double*, i64, , i64, i64) define @test_vlsseg2_nxv1f64(double* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv1f64: @@ -3238,20 +3150,19 @@ ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlsseg2e64.v v7, (a0), a1 ; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; CHECK-NEXT: vlsseg2e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1f64(double* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv1f64( %1, %1, double* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv1f64( %1, %1, double* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlsseg3.nxv1f64(double*, i64, i64) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv1f64(,,, double*, i64, , i64) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv1f64(,,, double*, i64, , i64, i64) define @test_vlsseg3_nxv1f64(double* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv1f64: @@ -3273,20 +3184,19 @@ ; CHECK-NEXT: vlsseg3e64.v v7, (a0), a1 ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; CHECK-NEXT: vlsseg3e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1f64(double* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv1f64( %1, %1, %1, double* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv1f64( %1, %1, %1, double* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlsseg4.nxv1f64(double*, i64, i64) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv1f64(,,,, double*, i64, , i64) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv1f64(,,,, double*, i64, , i64, i64) define @test_vlsseg4_nxv1f64(double* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv1f64: @@ -3309,20 +3219,19 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; CHECK-NEXT: vlsseg4e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1f64(double* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv1f64( %1, %1, %1, %1, double* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv1f64( %1, %1, %1, %1, double* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,,,,} @llvm.riscv.vlsseg5.nxv1f64(double*, i64, i64) -declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv1f64(,,,,, double*, i64, , i64) +declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv1f64(,,,,, double*, i64, , i64, i64) define @test_vlsseg5_nxv1f64(double* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv1f64: @@ -3346,20 +3255,19 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; CHECK-NEXT: vlsseg5e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1f64(double* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv1f64( %1, %1, %1, %1, %1, double* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv1f64( %1, %1, %1, %1, %1, double* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } declare {,,,,,} @llvm.riscv.vlsseg6.nxv1f64(double*, i64, i64) -declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1f64(,,,,,, double*, i64, , i64) +declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1f64(,,,,,, double*, i64, , i64, i64) define @test_vlsseg6_nxv1f64(double* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv1f64: @@ -3384,20 +3292,19 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; CHECK-NEXT: vlsseg6e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1f64(double* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1f64( %1, %1, %1, %1, %1, %1, double* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1f64( %1, %1, %1, %1, %1, %1, double* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } declare {,,,,,,} @llvm.riscv.vlsseg7.nxv1f64(double*, i64, i64) -declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1f64(,,,,,,, double*, i64, , i64) +declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1f64(,,,,,,, double*, i64, , i64, i64) define @test_vlsseg7_nxv1f64(double* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv1f64: @@ -3423,20 +3330,19 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; CHECK-NEXT: vlsseg7e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1f64(double* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1f64( %1, %1, %1, %1, %1, %1, %1, double* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1f64( %1, %1, %1, %1, %1, %1, %1, double* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f64(double*, i64, i64) -declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1f64(,,,,,,,, double*, i64, , i64) +declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1f64(,,,,,,,, double*, i64, , i64, i64) define @test_vlsseg8_nxv1f64(double* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv1f64: @@ -3463,20 +3369,19 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; CHECK-NEXT: vlsseg8e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f64(double* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1f64( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1f64( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlsseg2.nxv2f32(float*, i64, i64) -declare {,} @llvm.riscv.vlsseg2.mask.nxv2f32(,, float*, i64, , i64) +declare {,} @llvm.riscv.vlsseg2.mask.nxv2f32(,, float*, i64, , i64, i64) define @test_vlsseg2_nxv2f32(float* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv2f32: @@ -3497,20 +3402,19 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1 ; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2f32(float* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv2f32( %1, %1, float* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv2f32( %1, %1, float* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlsseg3.nxv2f32(float*, i64, i64) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv2f32(,,, float*, i64, , i64) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv2f32(,,, float*, i64, , i64, i64) define @test_vlsseg3_nxv2f32(float* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv2f32: @@ -3532,20 +3436,19 @@ ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1 ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2f32(float* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv2f32( %1, %1, %1, float* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv2f32( %1, %1, %1, float* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlsseg4.nxv2f32(float*, i64, i64) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv2f32(,,,, float*, i64, , i64) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv2f32(,,,, float*, i64, , i64, i64) define @test_vlsseg4_nxv2f32(float* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv2f32: @@ -3568,20 +3471,19 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2f32(float* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv2f32( %1, %1, %1, %1, float* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv2f32( %1, %1, %1, %1, float* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,,,,} @llvm.riscv.vlsseg5.nxv2f32(float*, i64, i64) -declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv2f32(,,,,, float*, i64, , i64) +declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv2f32(,,,,, float*, i64, , i64, i64) define @test_vlsseg5_nxv2f32(float* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv2f32: @@ -3605,20 +3507,19 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2f32(float* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv2f32( %1, %1, %1, %1, %1, float* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv2f32( %1, %1, %1, %1, %1, float* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } declare {,,,,,} @llvm.riscv.vlsseg6.nxv2f32(float*, i64, i64) -declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2f32(,,,,,, float*, i64, , i64) +declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2f32(,,,,,, float*, i64, , i64, i64) define @test_vlsseg6_nxv2f32(float* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv2f32: @@ -3643,20 +3544,19 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2f32(float* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2f32( %1, %1, %1, %1, %1, %1, float* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2f32( %1, %1, %1, %1, %1, %1, float* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } declare {,,,,,,} @llvm.riscv.vlsseg7.nxv2f32(float*, i64, i64) -declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2f32(,,,,,,, float*, i64, , i64) +declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2f32(,,,,,,, float*, i64, , i64, i64) define @test_vlsseg7_nxv2f32(float* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv2f32: @@ -3682,20 +3582,19 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2f32(float* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2f32( %1, %1, %1, %1, %1, %1, %1, float* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2f32( %1, %1, %1, %1, %1, %1, %1, float* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv2f32(float*, i64, i64) -declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2f32(,,,,,,,, float*, i64, , i64) +declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2f32(,,,,,,,, float*, i64, , i64, i64) define @test_vlsseg8_nxv2f32(float* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv2f32: @@ -3722,20 +3621,19 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2f32(float* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2f32( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2f32( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlsseg2.nxv1f16(half*, i64, i64) -declare {,} @llvm.riscv.vlsseg2.mask.nxv1f16(,, half*, i64, , i64) +declare {,} @llvm.riscv.vlsseg2.mask.nxv1f16(,, half*, i64, , i64, i64) define @test_vlsseg2_nxv1f16(half* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv1f16: @@ -3756,20 +3654,19 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 ; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, tu, mu ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1f16(half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv1f16( %1, %1, half* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv1f16( %1, %1, half* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlsseg3.nxv1f16(half*, i64, i64) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv1f16(,,, half*, i64, , i64) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv1f16(,,, half*, i64, , i64, i64) define @test_vlsseg3_nxv1f16(half* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv1f16: @@ -3791,20 +3688,19 @@ ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, tu, mu ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1f16(half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv1f16( %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv1f16( %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlsseg4.nxv1f16(half*, i64, i64) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv1f16(,,,, half*, i64, , i64) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv1f16(,,,, half*, i64, , i64, i64) define @test_vlsseg4_nxv1f16(half* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv1f16: @@ -3827,20 +3723,19 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, tu, mu ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1f16(half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv1f16( %1, %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv1f16( %1, %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,,,,} @llvm.riscv.vlsseg5.nxv1f16(half*, i64, i64) -declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv1f16(,,,,, half*, i64, , i64) +declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv1f16(,,,,, half*, i64, , i64, i64) define @test_vlsseg5_nxv1f16(half* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv1f16: @@ -3864,20 +3759,19 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, tu, mu ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1f16(half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv1f16( %1, %1, %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv1f16( %1, %1, %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } declare {,,,,,} @llvm.riscv.vlsseg6.nxv1f16(half*, i64, i64) -declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1f16(,,,,,, half*, i64, , i64) +declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1f16(,,,,,, half*, i64, , i64, i64) define @test_vlsseg6_nxv1f16(half* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv1f16: @@ -3902,20 +3796,19 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, tu, mu ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1f16(half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1f16( %1, %1, %1, %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1f16( %1, %1, %1, %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } declare {,,,,,,} @llvm.riscv.vlsseg7.nxv1f16(half*, i64, i64) -declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1f16(,,,,,,, half*, i64, , i64) +declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1f16(,,,,,,, half*, i64, , i64, i64) define @test_vlsseg7_nxv1f16(half* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv1f16: @@ -3941,20 +3834,19 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, tu, mu ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1f16(half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1f16( %1, %1, %1, %1, %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1f16( %1, %1, %1, %1, %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f16(half*, i64, i64) -declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1f16(,,,,,,,, half*, i64, , i64) +declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1f16(,,,,,,,, half*, i64, , i64, i64) define @test_vlsseg8_nxv1f16(half* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv1f16: @@ -3981,20 +3873,19 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, tu, mu ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f16(half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1f16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1f16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlsseg2.nxv1f32(float*, i64, i64) -declare {,} @llvm.riscv.vlsseg2.mask.nxv1f32(,, float*, i64, , i64) +declare {,} @llvm.riscv.vlsseg2.mask.nxv1f32(,, float*, i64, , i64, i64) define @test_vlsseg2_nxv1f32(float* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv1f32: @@ -4015,20 +3906,19 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1 ; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1f32(float* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv1f32( %1, %1, float* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv1f32( %1, %1, float* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlsseg3.nxv1f32(float*, i64, i64) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv1f32(,,, float*, i64, , i64) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv1f32(,,, float*, i64, , i64, i64) define @test_vlsseg3_nxv1f32(float* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv1f32: @@ -4050,20 +3940,19 @@ ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1 ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1f32(float* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv1f32( %1, %1, %1, float* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv1f32( %1, %1, %1, float* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlsseg4.nxv1f32(float*, i64, i64) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv1f32(,,,, float*, i64, , i64) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv1f32(,,,, float*, i64, , i64, i64) define @test_vlsseg4_nxv1f32(float* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv1f32: @@ -4086,20 +3975,19 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1f32(float* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv1f32( %1, %1, %1, %1, float* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv1f32( %1, %1, %1, %1, float* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,,,,} @llvm.riscv.vlsseg5.nxv1f32(float*, i64, i64) -declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv1f32(,,,,, float*, i64, , i64) +declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv1f32(,,,,, float*, i64, , i64, i64) define @test_vlsseg5_nxv1f32(float* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv1f32: @@ -4123,20 +4011,19 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1f32(float* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv1f32( %1, %1, %1, %1, %1, float* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv1f32( %1, %1, %1, %1, %1, float* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } declare {,,,,,} @llvm.riscv.vlsseg6.nxv1f32(float*, i64, i64) -declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1f32(,,,,,, float*, i64, , i64) +declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1f32(,,,,,, float*, i64, , i64, i64) define @test_vlsseg6_nxv1f32(float* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv1f32: @@ -4161,20 +4048,19 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1f32(float* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1f32( %1, %1, %1, %1, %1, %1, float* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1f32( %1, %1, %1, %1, %1, %1, float* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } declare {,,,,,,} @llvm.riscv.vlsseg7.nxv1f32(float*, i64, i64) -declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1f32(,,,,,,, float*, i64, , i64) +declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1f32(,,,,,,, float*, i64, , i64, i64) define @test_vlsseg7_nxv1f32(float* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv1f32: @@ -4200,20 +4086,19 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1f32(float* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1f32( %1, %1, %1, %1, %1, %1, %1, float* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1f32( %1, %1, %1, %1, %1, %1, %1, float* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f32(float*, i64, i64) -declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1f32(,,,,,,,, float*, i64, , i64) +declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1f32(,,,,,,,, float*, i64, , i64, i64) define @test_vlsseg8_nxv1f32(float* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv1f32: @@ -4240,20 +4125,19 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f32(float* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1f32( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1f32( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlsseg2.nxv8f16(half*, i64, i64) -declare {,} @llvm.riscv.vlsseg2.mask.nxv8f16(,, half*, i64, , i64) +declare {,} @llvm.riscv.vlsseg2.mask.nxv8f16(,, half*, i64, , i64, i64) define @test_vlsseg2_nxv8f16(half* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv8f16: @@ -4274,20 +4158,19 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu ; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1 ; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, tu, mu ; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8f16(half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv8f16( %1, %1, half* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv8f16( %1, %1, half* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlsseg3.nxv8f16(half*, i64, i64) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv8f16(,,, half*, i64, , i64) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv8f16(,,, half*, i64, , i64, i64) define @test_vlsseg3_nxv8f16(half* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv8f16: @@ -4309,20 +4192,19 @@ ; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1 ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, tu, mu ; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv8f16(half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv8f16( %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv8f16( %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlsseg4.nxv8f16(half*, i64, i64) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv8f16(,,,, half*, i64, , i64) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv8f16(,,,, half*, i64, , i64, i64) define @test_vlsseg4_nxv8f16(half* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv8f16: @@ -4345,20 +4227,19 @@ ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, tu, mu ; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv8f16(half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv8f16( %1, %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv8f16( %1, %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlsseg2.nxv8f32(float*, i64, i64) -declare {,} @llvm.riscv.vlsseg2.mask.nxv8f32(,, float*, i64, , i64) +declare {,} @llvm.riscv.vlsseg2.mask.nxv8f32(,, float*, i64, , i64, i64) define @test_vlsseg2_nxv8f32(float* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv8f32: @@ -4379,20 +4260,19 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu ; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1 ; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: vsetvli zero, zero, e32, m4, tu, mu ; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8f32(float* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv8f32( %1, %1, float* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv8f32( %1, %1, float* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlsseg2.nxv2f64(double*, i64, i64) -declare {,} @llvm.riscv.vlsseg2.mask.nxv2f64(,, double*, i64, , i64) +declare {,} @llvm.riscv.vlsseg2.mask.nxv2f64(,, double*, i64, , i64, i64) define @test_vlsseg2_nxv2f64(double* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv2f64: @@ -4413,20 +4293,19 @@ ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlsseg2e64.v v6, (a0), a1 ; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu ; CHECK-NEXT: vlsseg2e64.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2f64(double* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv2f64( %1, %1, double* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv2f64( %1, %1, double* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlsseg3.nxv2f64(double*, i64, i64) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv2f64(,,, double*, i64, , i64) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv2f64(,,, double*, i64, , i64, i64) define @test_vlsseg3_nxv2f64(double* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv2f64: @@ -4448,20 +4327,19 @@ ; CHECK-NEXT: vlsseg3e64.v v6, (a0), a1 ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu ; CHECK-NEXT: vlsseg3e64.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2f64(double* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv2f64( %1, %1, %1, double* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv2f64( %1, %1, %1, double* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlsseg4.nxv2f64(double*, i64, i64) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv2f64(,,,, double*, i64, , i64) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv2f64(,,,, double*, i64, , i64, i64) define @test_vlsseg4_nxv2f64(double* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv2f64: @@ -4484,20 +4362,19 @@ ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu ; CHECK-NEXT: vlsseg4e64.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2f64(double* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv2f64( %1, %1, %1, %1, double* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv2f64( %1, %1, %1, %1, double* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlsseg2.nxv4f16(half*, i64, i64) -declare {,} @llvm.riscv.vlsseg2.mask.nxv4f16(,, half*, i64, , i64) +declare {,} @llvm.riscv.vlsseg2.mask.nxv4f16(,, half*, i64, , i64, i64) define @test_vlsseg2_nxv4f16(half* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv4f16: @@ -4518,20 +4395,19 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 ; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4f16(half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv4f16( %1, %1, half* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv4f16( %1, %1, half* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlsseg3.nxv4f16(half*, i64, i64) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv4f16(,,, half*, i64, , i64) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv4f16(,,, half*, i64, , i64, i64) define @test_vlsseg3_nxv4f16(half* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv4f16: @@ -4553,20 +4429,19 @@ ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4f16(half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv4f16( %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv4f16( %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlsseg4.nxv4f16(half*, i64, i64) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv4f16(,,,, half*, i64, , i64) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv4f16(,,,, half*, i64, , i64, i64) define @test_vlsseg4_nxv4f16(half* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv4f16: @@ -4589,20 +4464,19 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4f16(half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv4f16( %1, %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv4f16( %1, %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,,,,} @llvm.riscv.vlsseg5.nxv4f16(half*, i64, i64) -declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv4f16(,,,,, half*, i64, , i64) +declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv4f16(,,,,, half*, i64, , i64, i64) define @test_vlsseg5_nxv4f16(half* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv4f16: @@ -4626,20 +4500,19 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv4f16(half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv4f16( %1, %1, %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv4f16( %1, %1, %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } declare {,,,,,} @llvm.riscv.vlsseg6.nxv4f16(half*, i64, i64) -declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv4f16(,,,,,, half*, i64, , i64) +declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv4f16(,,,,,, half*, i64, , i64, i64) define @test_vlsseg6_nxv4f16(half* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv4f16: @@ -4664,20 +4537,19 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv4f16(half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv4f16( %1, %1, %1, %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv4f16( %1, %1, %1, %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } declare {,,,,,,} @llvm.riscv.vlsseg7.nxv4f16(half*, i64, i64) -declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv4f16(,,,,,,, half*, i64, , i64) +declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv4f16(,,,,,,, half*, i64, , i64, i64) define @test_vlsseg7_nxv4f16(half* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv4f16: @@ -4703,20 +4575,19 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv4f16(half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv4f16( %1, %1, %1, %1, %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv4f16( %1, %1, %1, %1, %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv4f16(half*, i64, i64) -declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv4f16(,,,,,,,, half*, i64, , i64) +declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv4f16(,,,,,,,, half*, i64, , i64, i64) define @test_vlsseg8_nxv4f16(half* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv4f16: @@ -4743,20 +4614,19 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv4f16(half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv4f16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv4f16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlsseg2.nxv2f16(half*, i64, i64) -declare {,} @llvm.riscv.vlsseg2.mask.nxv2f16(,, half*, i64, , i64) +declare {,} @llvm.riscv.vlsseg2.mask.nxv2f16(,, half*, i64, , i64, i64) define @test_vlsseg2_nxv2f16(half* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv2f16: @@ -4777,20 +4647,19 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 ; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2f16(half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv2f16( %1, %1, half* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv2f16( %1, %1, half* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlsseg3.nxv2f16(half*, i64, i64) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv2f16(,,, half*, i64, , i64) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv2f16(,,, half*, i64, , i64, i64) define @test_vlsseg3_nxv2f16(half* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv2f16: @@ -4812,20 +4681,19 @@ ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2f16(half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv2f16( %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv2f16( %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlsseg4.nxv2f16(half*, i64, i64) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv2f16(,,,, half*, i64, , i64) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv2f16(,,,, half*, i64, , i64, i64) define @test_vlsseg4_nxv2f16(half* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv2f16: @@ -4848,20 +4716,19 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2f16(half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv2f16( %1, %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv2f16( %1, %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } declare {,,,,} @llvm.riscv.vlsseg5.nxv2f16(half*, i64, i64) -declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv2f16(,,,,, half*, i64, , i64) +declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv2f16(,,,,, half*, i64, , i64, i64) define @test_vlsseg5_nxv2f16(half* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv2f16: @@ -4885,20 +4752,19 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2f16(half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv2f16( %1, %1, %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv2f16( %1, %1, %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } declare {,,,,,} @llvm.riscv.vlsseg6.nxv2f16(half*, i64, i64) -declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2f16(,,,,,, half*, i64, , i64) +declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2f16(,,,,,, half*, i64, , i64, i64) define @test_vlsseg6_nxv2f16(half* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv2f16: @@ -4923,20 +4789,19 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2f16(half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2f16( %1, %1, %1, %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2f16( %1, %1, %1, %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } declare {,,,,,,} @llvm.riscv.vlsseg7.nxv2f16(half*, i64, i64) -declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2f16(,,,,,,, half*, i64, , i64) +declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2f16(,,,,,,, half*, i64, , i64, i64) define @test_vlsseg7_nxv2f16(half* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv2f16: @@ -4962,20 +4827,19 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2f16(half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2f16( %1, %1, %1, %1, %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2f16( %1, %1, %1, %1, %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv2f16(half*, i64, i64) -declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2f16(,,,,,,,, half*, i64, , i64) +declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2f16(,,,,,,,, half*, i64, , i64, i64) define @test_vlsseg8_nxv2f16(half* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv2f16: @@ -5002,20 +4866,19 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2f16(half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2f16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2f16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlsseg2.nxv4f32(float*, i64, i64) -declare {,} @llvm.riscv.vlsseg2.mask.nxv4f32(,, float*, i64, , i64) +declare {,} @llvm.riscv.vlsseg2.mask.nxv4f32(,, float*, i64, , i64, i64) define @test_vlsseg2_nxv4f32(float* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv4f32: @@ -5036,20 +4899,19 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu ; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1 ; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vsetvli zero, zero, e32, m2, tu, mu ; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4f32(float* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv4f32( %1, %1, float* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv4f32( %1, %1, float* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,,} @llvm.riscv.vlsseg3.nxv4f32(float*, i64, i64) -declare {,,} @llvm.riscv.vlsseg3.mask.nxv4f32(,,, float*, i64, , i64) +declare {,,} @llvm.riscv.vlsseg3.mask.nxv4f32(,,, float*, i64, , i64, i64) define @test_vlsseg3_nxv4f32(float* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv4f32: @@ -5071,20 +4933,19 @@ ; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1 ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vsetvli zero, zero, e32, m2, tu, mu ; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4f32(float* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv4f32( %1, %1, %1, float* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv4f32( %1, %1, %1, float* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } declare {,,,} @llvm.riscv.vlsseg4.nxv4f32(float*, i64, i64) -declare {,,,} @llvm.riscv.vlsseg4.mask.nxv4f32(,,,, float*, i64, , i64) +declare {,,,} @llvm.riscv.vlsseg4.mask.nxv4f32(,,,, float*, i64, , i64, i64) define @test_vlsseg4_nxv4f32(float* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv4f32: @@ -5107,14 +4968,13 @@ ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vsetvli zero, zero, e32, m2, tu, mu ; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4f32(float* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv4f32( %1, %1, %1, %1, float* %base, i64 %offset, %mask, i64 %vl) + %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv4f32( %1, %1, %1, %1, float* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } diff --git a/llvm/test/CodeGen/RISCV/rvv/vluxei-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vluxei-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vluxei-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vluxei-rv32.ll @@ -27,12 +27,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -41,7 +42,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -72,12 +73,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -86,7 +88,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -117,12 +119,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -131,7 +134,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -162,12 +165,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -176,7 +180,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -207,12 +211,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -221,7 +226,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -252,12 +257,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -266,7 +272,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -297,12 +303,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -311,7 +318,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -342,12 +349,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -356,7 +364,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -387,12 +395,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -401,7 +410,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -432,12 +441,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -446,7 +456,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -477,12 +487,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -491,7 +502,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -522,12 +533,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -536,7 +548,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -566,12 +578,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -580,7 +593,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -610,12 +623,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -624,7 +638,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -654,12 +668,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -668,7 +683,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -698,12 +713,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -712,7 +728,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -743,12 +759,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -757,7 +774,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -788,12 +805,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -802,7 +820,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -833,12 +851,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -847,7 +866,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -878,12 +897,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -892,7 +912,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -923,12 +943,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -937,7 +958,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -968,12 +989,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -982,7 +1004,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1013,12 +1035,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -1027,7 +1050,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1058,12 +1081,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -1072,7 +1096,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1102,12 +1126,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1116,7 +1141,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1146,12 +1171,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -1160,7 +1186,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1190,12 +1216,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -1204,7 +1231,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1234,12 +1261,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -1248,7 +1276,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1279,12 +1307,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i32( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1293,7 +1322,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1324,12 +1353,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i32( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1338,7 +1368,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1369,12 +1399,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i32( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -1383,7 +1414,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1414,12 +1445,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i32( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -1428,7 +1460,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1459,12 +1491,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i32( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vluxei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -1473,7 +1506,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1504,12 +1537,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i32( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1518,7 +1552,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1549,12 +1583,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i32( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1563,7 +1598,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1594,12 +1629,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i32( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -1608,7 +1644,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1639,12 +1675,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i32( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -1653,7 +1690,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1684,12 +1721,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i32( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vluxei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -1698,7 +1736,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1728,12 +1766,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i32( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1742,7 +1781,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1772,12 +1811,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i32( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1786,7 +1826,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1816,12 +1856,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i32( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -1830,7 +1871,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1860,12 +1901,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i32( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -1874,7 +1916,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1904,12 +1946,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i32( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vluxei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -1918,7 +1961,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1949,12 +1992,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i32( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1963,7 +2007,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1994,12 +2038,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i32( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -2008,7 +2053,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2039,12 +2084,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i32( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -2053,7 +2099,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2084,12 +2130,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i32( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vluxei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -2098,7 +2145,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2129,12 +2176,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i32( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2143,7 +2191,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2174,12 +2222,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i32( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2188,7 +2237,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2219,12 +2268,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i32( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -2233,7 +2283,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2264,12 +2314,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i32( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -2278,7 +2329,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2309,12 +2360,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i32( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vluxei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -2323,7 +2375,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2353,12 +2405,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i32( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2367,7 +2420,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2397,12 +2450,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i32( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2411,7 +2465,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2441,12 +2495,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i32( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -2455,7 +2510,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2485,12 +2540,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i32( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -2499,7 +2555,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2529,12 +2585,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i32( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vluxei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -2543,7 +2600,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2574,12 +2631,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i32( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2588,7 +2646,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2619,12 +2677,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i32( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -2633,7 +2692,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2664,12 +2723,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i32( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -2678,7 +2738,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2709,12 +2769,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i32( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vluxei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -2723,7 +2784,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2754,12 +2815,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2768,7 +2830,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2799,12 +2861,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2813,7 +2876,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2844,12 +2907,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2858,7 +2922,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2889,12 +2953,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -2903,7 +2968,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2934,12 +2999,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -2948,7 +3014,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2979,12 +3045,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -2993,7 +3060,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -3023,12 +3090,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3037,7 +3105,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -3067,12 +3135,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3081,7 +3150,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -3111,12 +3180,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3125,7 +3195,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -3155,12 +3225,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -3169,7 +3240,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -3199,12 +3270,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -3213,7 +3285,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -3243,12 +3315,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -3257,7 +3330,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -3288,12 +3361,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3302,7 +3376,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -3333,12 +3407,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3347,7 +3422,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -3378,12 +3453,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -3392,7 +3468,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -3423,12 +3499,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -3437,7 +3514,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -3468,12 +3545,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -3482,7 +3560,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -3513,12 +3591,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3527,7 +3606,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -3558,12 +3637,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -3572,7 +3652,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -3603,12 +3683,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -3617,7 +3698,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -3648,12 +3729,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -3662,7 +3744,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -3692,12 +3774,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3706,7 +3789,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -3736,12 +3819,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3750,7 +3834,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -3780,12 +3864,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3794,7 +3879,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -3824,12 +3909,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -3838,7 +3924,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -3868,12 +3954,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -3882,7 +3969,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -3912,12 +3999,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -3926,7 +4014,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -3957,12 +4045,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3971,7 +4060,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -4002,12 +4091,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4016,7 +4106,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -4047,12 +4137,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -4061,7 +4152,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -4092,12 +4183,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -4106,7 +4198,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -4137,12 +4229,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -4151,7 +4244,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -4182,12 +4275,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4196,7 +4290,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -4227,12 +4321,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -4241,7 +4336,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -4272,12 +4367,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -4286,7 +4382,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -4317,12 +4413,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i16( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -4331,7 +4428,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -4361,12 +4458,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4375,7 +4473,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -4405,12 +4503,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4419,7 +4518,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -4449,12 +4548,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4463,7 +4563,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -4493,12 +4593,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4507,7 +4608,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -4537,12 +4638,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -4551,7 +4653,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -4581,12 +4683,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -4595,7 +4698,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -4625,12 +4728,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv64i8_nxv64i8_nxv64i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -4639,7 +4743,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -4670,12 +4774,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4684,7 +4789,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -4715,12 +4820,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4729,7 +4835,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -4760,12 +4866,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4774,7 +4881,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -4805,12 +4912,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -4819,7 +4927,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -4850,12 +4958,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -4864,7 +4973,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -4895,12 +5004,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -4909,7 +5019,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -4940,12 +5050,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4954,7 +5065,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -4985,12 +5096,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4999,7 +5111,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -5030,12 +5142,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -5044,7 +5157,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -5075,12 +5188,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -5089,7 +5203,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -5120,12 +5234,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -5134,7 +5249,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -5165,12 +5280,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5179,7 +5295,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -5210,12 +5326,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -5224,7 +5341,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -5255,12 +5372,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -5269,7 +5387,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -5300,12 +5418,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -5314,7 +5433,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -5345,12 +5464,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5359,7 +5479,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -5390,12 +5510,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5404,7 +5525,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -5435,12 +5556,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5449,7 +5571,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -5480,12 +5602,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -5494,7 +5617,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -5525,12 +5648,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -5539,7 +5663,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -5570,12 +5694,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -5584,7 +5709,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -5615,12 +5740,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5629,7 +5755,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -5660,12 +5786,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5674,7 +5801,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -5705,12 +5832,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -5719,7 +5847,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -5750,12 +5878,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -5764,7 +5893,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -5795,12 +5924,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -5809,7 +5939,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -5840,12 +5970,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5854,7 +5985,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -5885,12 +6016,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -5899,7 +6031,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -5930,12 +6062,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -5944,7 +6077,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -5975,12 +6108,13 @@ *, , , + i32, i32); define @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i8( %0, * %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -5989,7 +6123,7 @@ * %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vluxei-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vluxei-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vluxei-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vluxei-rv64.ll @@ -27,12 +27,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -41,7 +42,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -72,12 +73,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -86,7 +88,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -117,12 +119,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -131,7 +134,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -162,12 +165,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -176,7 +180,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -207,12 +211,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -221,7 +226,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -252,12 +257,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -266,7 +272,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -297,12 +303,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -311,7 +318,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -342,12 +349,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -356,7 +364,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -387,12 +395,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -401,7 +410,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -432,12 +441,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -446,7 +456,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -477,12 +487,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -491,7 +502,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -522,12 +533,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -536,7 +548,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -566,12 +578,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -580,7 +593,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -610,12 +623,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -624,7 +638,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -654,12 +668,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -668,7 +683,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -698,12 +713,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -712,7 +728,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -743,12 +759,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -757,7 +774,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -788,12 +805,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -802,7 +820,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -833,12 +851,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -847,7 +866,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -878,12 +897,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -892,7 +912,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -923,12 +943,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -937,7 +958,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -968,12 +989,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -982,7 +1004,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1013,12 +1035,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -1027,7 +1050,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1058,12 +1081,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -1072,7 +1096,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1102,12 +1126,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1116,7 +1141,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1146,12 +1171,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -1160,7 +1186,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1190,12 +1216,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -1204,7 +1231,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1234,12 +1261,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -1248,7 +1276,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1279,12 +1307,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1293,7 +1322,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1324,12 +1353,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1338,7 +1368,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1369,12 +1399,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -1383,7 +1414,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1414,12 +1445,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -1428,7 +1460,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1459,12 +1491,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i32( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vluxei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -1473,7 +1506,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1504,12 +1537,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1518,7 +1552,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1549,12 +1583,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1563,7 +1598,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1594,12 +1629,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -1608,7 +1644,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1639,12 +1675,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -1653,7 +1690,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1684,12 +1721,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i32( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vluxei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -1698,7 +1736,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1728,12 +1766,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1742,7 +1781,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1772,12 +1811,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1786,7 +1826,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1816,12 +1856,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -1830,7 +1871,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1860,12 +1901,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -1874,7 +1916,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1904,12 +1946,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i32( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vluxei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -1918,7 +1961,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1949,12 +1992,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1963,7 +2007,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1994,12 +2038,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -2008,7 +2053,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -2039,12 +2084,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -2053,7 +2099,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -2084,12 +2130,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vluxei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -2098,7 +2145,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -2129,12 +2176,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2143,7 +2191,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -2174,12 +2222,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2188,7 +2237,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -2219,12 +2268,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -2233,7 +2283,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -2264,12 +2314,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -2278,7 +2329,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -2309,12 +2360,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i32( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vluxei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -2323,7 +2375,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -2353,12 +2405,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2367,7 +2420,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -2397,12 +2450,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2411,7 +2465,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -2441,12 +2495,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -2455,7 +2510,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -2485,12 +2540,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -2499,7 +2555,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -2529,12 +2585,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i32( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vluxei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -2543,7 +2600,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -2574,12 +2631,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2588,7 +2646,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -2619,12 +2677,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -2633,7 +2692,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -2664,12 +2723,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -2678,7 +2738,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -2709,12 +2769,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vluxei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -2723,7 +2784,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -2754,12 +2815,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2768,7 +2830,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -2799,12 +2861,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2813,7 +2876,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -2844,12 +2907,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2858,7 +2922,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -2889,12 +2953,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -2903,7 +2968,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -2934,12 +2999,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -2948,7 +3014,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -2979,12 +3045,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -2993,7 +3060,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -3023,12 +3090,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3037,7 +3105,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -3067,12 +3135,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3081,7 +3150,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -3111,12 +3180,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3125,7 +3195,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -3155,12 +3225,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -3169,7 +3240,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -3199,12 +3270,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -3213,7 +3285,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -3243,12 +3315,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -3257,7 +3330,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -3288,12 +3361,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3302,7 +3376,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -3333,12 +3407,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3347,7 +3422,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -3378,12 +3453,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -3392,7 +3468,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -3423,12 +3499,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -3437,7 +3514,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -3468,12 +3545,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -3482,7 +3560,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -3513,12 +3591,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3527,7 +3606,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -3558,12 +3637,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -3572,7 +3652,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -3603,12 +3683,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -3617,7 +3698,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -3648,12 +3729,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -3662,7 +3744,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -3692,12 +3774,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3706,7 +3789,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -3736,12 +3819,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3750,7 +3834,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -3780,12 +3864,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3794,7 +3879,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -3824,12 +3909,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -3838,7 +3924,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -3868,12 +3954,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -3882,7 +3969,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -3912,12 +3999,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -3926,7 +4014,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -3957,12 +4045,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3971,7 +4060,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -4002,12 +4091,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4016,7 +4106,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -4047,12 +4137,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -4061,7 +4152,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -4092,12 +4183,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -4106,7 +4198,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -4137,12 +4229,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -4151,7 +4244,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -4182,12 +4275,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4196,7 +4290,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -4227,12 +4321,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -4241,7 +4336,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -4272,12 +4367,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -4286,7 +4382,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -4317,12 +4413,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vluxei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -4331,7 +4428,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -4361,12 +4458,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4375,7 +4473,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -4405,12 +4503,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4419,7 +4518,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -4449,12 +4548,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4463,7 +4563,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -4493,12 +4593,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4507,7 +4608,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -4537,12 +4638,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -4551,7 +4653,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -4581,12 +4683,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -4595,7 +4698,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -4625,12 +4728,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv64i8_nxv64i8_nxv64i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -4639,7 +4743,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -4670,12 +4774,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4684,7 +4789,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -4715,12 +4820,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4729,7 +4835,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -4760,12 +4866,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4774,7 +4881,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -4805,12 +4912,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -4819,7 +4927,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -4850,12 +4958,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -4864,7 +4973,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -4895,12 +5004,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -4909,7 +5019,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -4940,12 +5050,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4954,7 +5065,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -4985,12 +5096,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4999,7 +5111,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -5030,12 +5142,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -5044,7 +5157,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -5075,12 +5188,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -5089,7 +5203,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -5120,12 +5234,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -5134,7 +5249,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -5165,12 +5280,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5179,7 +5295,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -5210,12 +5326,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -5224,7 +5341,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -5255,12 +5372,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -5269,7 +5387,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -5300,12 +5418,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -5314,7 +5433,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -5345,12 +5464,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5359,7 +5479,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -5390,12 +5510,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5404,7 +5525,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -5435,12 +5556,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5449,7 +5571,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -5480,12 +5602,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -5494,7 +5617,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -5525,12 +5648,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -5539,7 +5663,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -5570,12 +5694,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -5584,7 +5709,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -5615,12 +5740,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5629,7 +5755,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -5660,12 +5786,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5674,7 +5801,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -5705,12 +5832,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -5719,7 +5847,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -5750,12 +5878,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -5764,7 +5893,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -5795,12 +5924,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -5809,7 +5939,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -5840,12 +5970,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5854,7 +5985,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -5885,12 +6016,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -5899,7 +6031,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -5930,12 +6062,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -5944,7 +6077,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -5975,12 +6108,13 @@ *, , , + i64, i64); define @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vluxei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -5989,7 +6123,7 @@ * %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv32.ll @@ -3,7 +3,7 @@ ; RUN: -verify-machineinstrs < %s | FileCheck %s declare {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i16(i16*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i16(,, i16*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i16(,, i16*, , , i32, i32) define @test_vluxseg2_nxv16i16_nxv16i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16i16_nxv16i16: @@ -22,18 +22,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i16( %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i16( %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i8(i16*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i8(,, i16*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i8(,, i16*, , , i32, i32) define @test_vluxseg2_nxv16i16_nxv16i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16i16_nxv16i8: @@ -52,18 +52,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i8( %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i8( %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i32(i16*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i32(,, i16*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i32(,, i16*, , , i32, i32) define @test_vluxseg2_nxv16i16_nxv16i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16i16_nxv16i32: @@ -82,18 +82,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v4, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i32( %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i32( %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv1i8.nxv1i8(i8*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i8(,, i8*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i8(,, i8*, , , i32, i32) define @test_vluxseg2_nxv1i8_nxv1i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i8_nxv1i8: @@ -112,18 +112,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i8( %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i8( %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv1i8.nxv1i32(i8*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i32(,, i8*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i32(,, i8*, , , i32, i32) define @test_vluxseg2_nxv1i8_nxv1i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i8_nxv1i32: @@ -142,18 +142,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i32( %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i32( %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv1i8.nxv1i16(i8*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i16(,, i8*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i16(,, i8*, , , i32, i32) define @test_vluxseg2_nxv1i8_nxv1i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i8_nxv1i16: @@ -172,18 +172,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i16( %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i16( %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv1i8.nxv1i8(i8*, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i8(,,, i8*, , , i32) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i8(,,, i8*, , , i32, i32) define @test_vluxseg3_nxv1i8_nxv1i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i8_nxv1i8: @@ -204,18 +204,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv1i8.nxv1i32(i8*, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i32(,,, i8*, , , i32) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i32(,,, i8*, , , i32, i32) define @test_vluxseg3_nxv1i8_nxv1i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i8_nxv1i32: @@ -236,18 +236,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv1i8.nxv1i16(i8*, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i16(,,, i8*, , , i32) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i16(,,, i8*, , , i32, i32) define @test_vluxseg3_nxv1i8_nxv1i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i8_nxv1i16: @@ -268,18 +268,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv1i8.nxv1i8(i8*, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i8(,,,, i8*, , , i32) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i8(,,,, i8*, , , i32, i32) define @test_vluxseg4_nxv1i8_nxv1i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i8_nxv1i8: @@ -301,18 +301,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv1i8.nxv1i32(i8*, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i32(,,,, i8*, , , i32) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i32(,,,, i8*, , , i32, i32) define @test_vluxseg4_nxv1i8_nxv1i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i8_nxv1i32: @@ -334,18 +334,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv1i8.nxv1i16(i8*, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i16(,,,, i8*, , , i32) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i16(,,,, i8*, , , i32, i32) define @test_vluxseg4_nxv1i8_nxv1i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i8_nxv1i16: @@ -367,18 +367,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv1i8.nxv1i8(i8*, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i8(,,,,, i8*, , , i32) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i8(,,,,, i8*, , , i32, i32) define @test_vluxseg5_nxv1i8_nxv1i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i8_nxv1i8: @@ -401,18 +401,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv1i8.nxv1i32(i8*, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i32(,,,,, i8*, , , i32) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i32(,,,,, i8*, , , i32, i32) define @test_vluxseg5_nxv1i8_nxv1i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i8_nxv1i32: @@ -435,18 +435,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv1i8.nxv1i16(i8*, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i16(,,,,, i8*, , , i32) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i16(,,,,, i8*, , , i32, i32) define @test_vluxseg5_nxv1i8_nxv1i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i8_nxv1i16: @@ -469,18 +469,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i8.nxv1i8(i8*, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i8(,,,,,, i8*, , , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i8(,,,,,, i8*, , , i32, i32) define @test_vluxseg6_nxv1i8_nxv1i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i8_nxv1i8: @@ -504,18 +504,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i8.nxv1i32(i8*, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i32(,,,,,, i8*, , , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i32(,,,,,, i8*, , , i32, i32) define @test_vluxseg6_nxv1i8_nxv1i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i8_nxv1i32: @@ -539,18 +539,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i8.nxv1i16(i8*, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i16(,,,,,, i8*, , , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i16(,,,,,, i8*, , , i32, i32) define @test_vluxseg6_nxv1i8_nxv1i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i8_nxv1i16: @@ -574,18 +574,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i8.nxv1i8(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i8(,,,,,,, i8*, , , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i8(,,,,,,, i8*, , , i32, i32) define @test_vluxseg7_nxv1i8_nxv1i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i8_nxv1i8: @@ -610,18 +610,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i8.nxv1i32(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i32(,,,,,,, i8*, , , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i32(,,,,,,, i8*, , , i32, i32) define @test_vluxseg7_nxv1i8_nxv1i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i8_nxv1i32: @@ -646,18 +646,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i8.nxv1i16(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i16(,,,,,,, i8*, , , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i16(,,,,,,, i8*, , , i32, i32) define @test_vluxseg7_nxv1i8_nxv1i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i8_nxv1i16: @@ -682,18 +682,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i8.nxv1i8(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i8(,,,,,,,, i8*, , , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i8(,,,,,,,, i8*, , , i32, i32) define @test_vluxseg8_nxv1i8_nxv1i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i8_nxv1i8: @@ -719,18 +719,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i8.nxv1i32(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i32(,,,,,,,, i8*, , , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i32(,,,,,,,, i8*, , , i32, i32) define @test_vluxseg8_nxv1i8_nxv1i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i8_nxv1i32: @@ -756,18 +756,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i8.nxv1i16(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i16(,,,,,,,, i8*, , , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i16(,,,,,,,, i8*, , , i32, i32) define @test_vluxseg8_nxv1i8_nxv1i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i8_nxv1i16: @@ -793,18 +793,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv16i8.nxv16i16(i8*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i16(,, i8*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i16(,, i8*, , , i32, i32) define @test_vluxseg2_nxv16i8_nxv16i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16i8_nxv16i16: @@ -823,18 +823,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i16( %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i16( %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv16i8.nxv16i8(i8*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i8(,, i8*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i8(,, i8*, , , i32, i32) define @test_vluxseg2_nxv16i8_nxv16i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16i8_nxv16i8: @@ -853,18 +853,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i8( %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i8( %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv16i8.nxv16i32(i8*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i32(,, i8*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i32(,, i8*, , , i32, i32) define @test_vluxseg2_nxv16i8_nxv16i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16i8_nxv16i32: @@ -883,18 +883,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i32( %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i32( %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv16i8.nxv16i16(i8*, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i16(,,, i8*, , , i32) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i16(,,, i8*, , , i32, i32) define @test_vluxseg3_nxv16i8_nxv16i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv16i8_nxv16i16: @@ -914,18 +914,18 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v6, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv16i8.nxv16i8(i8*, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i8(,,, i8*, , , i32) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i8(,,, i8*, , , i32, i32) define @test_vluxseg3_nxv16i8_nxv16i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv16i8_nxv16i8: @@ -946,18 +946,18 @@ ; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv16i8.nxv16i32(i8*, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i32(,,, i8*, , , i32) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i32(,,, i8*, , , i32, i32) define @test_vluxseg3_nxv16i8_nxv16i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv16i8_nxv16i32: @@ -977,18 +977,18 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v6, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv16i8.nxv16i16(i8*, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i16(,,,, i8*, , , i32) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i16(,,,, i8*, , , i32, i32) define @test_vluxseg4_nxv16i8_nxv16i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv16i8_nxv16i16: @@ -1010,18 +1010,18 @@ ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v16, (a0), v12, v0.t ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv16i8.nxv16i8(i8*, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i8(,,,, i8*, , , i32) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i8(,,,, i8*, , , i32, i32) define @test_vluxseg4_nxv16i8_nxv16i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv16i8_nxv16i8: @@ -1043,18 +1043,18 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv16i8.nxv16i32(i8*, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i32(,,,, i8*, , , i32) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i32(,,,, i8*, , , i32, i32) define @test_vluxseg4_nxv16i8_nxv16i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv16i8_nxv16i32: @@ -1075,18 +1075,18 @@ ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v6, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i32(i32*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i32(,, i32*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i32(,, i32*, , , i32, i32) define @test_vluxseg2_nxv2i32_nxv2i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i32_nxv2i32: @@ -1105,18 +1105,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i32( %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i32( %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i8(i32*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i8(,, i32*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i8(,, i32*, , , i32, i32) define @test_vluxseg2_nxv2i32_nxv2i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i32_nxv2i8: @@ -1135,18 +1135,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i8( %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i8( %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i16(i32*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i16(,, i32*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i16(,, i32*, , , i32, i32) define @test_vluxseg2_nxv2i32_nxv2i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i32_nxv2i16: @@ -1165,18 +1165,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i16( %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i16( %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i32(i32*, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i32(,,, i32*, , , i32) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i32(,,, i32*, , , i32, i32) define @test_vluxseg3_nxv2i32_nxv2i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i32_nxv2i32: @@ -1197,18 +1197,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i32( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i32( %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i8(i32*, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i8(,,, i32*, , , i32) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i8(,,, i32*, , , i32, i32) define @test_vluxseg3_nxv2i32_nxv2i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i32_nxv2i8: @@ -1229,18 +1229,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i8( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i8( %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i16(i32*, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i16(,,, i32*, , , i32) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i16(,,, i32*, , , i32, i32) define @test_vluxseg3_nxv2i32_nxv2i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i32_nxv2i16: @@ -1261,18 +1261,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i16( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i16( %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i32(i32*, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i32(,,,, i32*, , , i32) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i32(,,,, i32*, , , i32, i32) define @test_vluxseg4_nxv2i32_nxv2i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i32_nxv2i32: @@ -1294,18 +1294,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i8(i32*, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i8(,,,, i32*, , , i32) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i8(,,,, i32*, , , i32, i32) define @test_vluxseg4_nxv2i32_nxv2i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i32_nxv2i8: @@ -1327,18 +1327,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i16(i32*, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i16(,,,, i32*, , , i32) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i16(,,,, i32*, , , i32, i32) define @test_vluxseg4_nxv2i32_nxv2i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i32_nxv2i16: @@ -1360,18 +1360,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i32(i32*, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i32(,,,,, i32*, , , i32) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i32(,,,,, i32*, , , i32, i32) define @test_vluxseg5_nxv2i32_nxv2i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i32_nxv2i32: @@ -1394,18 +1394,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i8(i32*, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i8(,,,,, i32*, , , i32) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i8(,,,,, i32*, , , i32, i32) define @test_vluxseg5_nxv2i32_nxv2i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i32_nxv2i8: @@ -1428,18 +1428,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i16(i32*, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i16(,,,,, i32*, , , i32) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i16(,,,,, i32*, , , i32, i32) define @test_vluxseg5_nxv2i32_nxv2i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i32_nxv2i16: @@ -1462,18 +1462,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i32(i32*, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i32(,,,,,, i32*, , , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i32(,,,,,, i32*, , , i32, i32) define @test_vluxseg6_nxv2i32_nxv2i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i32_nxv2i32: @@ -1497,18 +1497,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i8(i32*, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i8(,,,,,, i32*, , , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i8(,,,,,, i32*, , , i32, i32) define @test_vluxseg6_nxv2i32_nxv2i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i32_nxv2i8: @@ -1532,18 +1532,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i16(i32*, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i16(,,,,,, i32*, , , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i16(,,,,,, i32*, , , i32, i32) define @test_vluxseg6_nxv2i32_nxv2i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i32_nxv2i16: @@ -1567,18 +1567,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i32(i32*, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i32(,,,,,,, i32*, , , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i32(,,,,,,, i32*, , , i32, i32) define @test_vluxseg7_nxv2i32_nxv2i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i32_nxv2i32: @@ -1603,18 +1603,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i8(i32*, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i8(,,,,,,, i32*, , , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i8(,,,,,,, i32*, , , i32, i32) define @test_vluxseg7_nxv2i32_nxv2i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i32_nxv2i8: @@ -1639,18 +1639,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i16(i32*, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i16(,,,,,,, i32*, , , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i16(,,,,,,, i32*, , , i32, i32) define @test_vluxseg7_nxv2i32_nxv2i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i32_nxv2i16: @@ -1675,18 +1675,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i32(i32*, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i32(,,,,,,,, i32*, , , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i32(,,,,,,,, i32*, , , i32, i32) define @test_vluxseg8_nxv2i32_nxv2i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i32_nxv2i32: @@ -1712,18 +1712,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i8(i32*, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i8(,,,,,,,, i32*, , , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i8(,,,,,,,, i32*, , , i32, i32) define @test_vluxseg8_nxv2i32_nxv2i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i32_nxv2i8: @@ -1749,18 +1749,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i16(i32*, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i16(,,,,,,,, i32*, , , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i16(,,,,,,,, i32*, , , i32, i32) define @test_vluxseg8_nxv2i32_nxv2i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i32_nxv2i16: @@ -1786,18 +1786,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i16(i16*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i16(,, i16*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i16(,, i16*, , , i32, i32) define @test_vluxseg2_nxv4i16_nxv4i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i16_nxv4i16: @@ -1816,18 +1816,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i16( %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i16( %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i8(i16*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i8(,, i16*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i8(,, i16*, , , i32, i32) define @test_vluxseg2_nxv4i16_nxv4i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i16_nxv4i8: @@ -1846,18 +1846,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i8( %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i8( %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i32(i16*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i32(,, i16*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i32(,, i16*, , , i32, i32) define @test_vluxseg2_nxv4i16_nxv4i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i16_nxv4i32: @@ -1876,18 +1876,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i32( %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i32( %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i16(i16*, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i16(,,, i16*, , , i32) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i16(,,, i16*, , , i32, i32) define @test_vluxseg3_nxv4i16_nxv4i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i16_nxv4i16: @@ -1908,18 +1908,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i8(i16*, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i8(,,, i16*, , , i32) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i8(,,, i16*, , , i32, i32) define @test_vluxseg3_nxv4i16_nxv4i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i16_nxv4i8: @@ -1940,18 +1940,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i32(i16*, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i32(,,, i16*, , , i32) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i32(,,, i16*, , , i32, i32) define @test_vluxseg3_nxv4i16_nxv4i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i16_nxv4i32: @@ -1971,18 +1971,18 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i32( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i32( %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i16(i16*, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i16(,,,, i16*, , , i32) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i16(,,,, i16*, , , i32, i32) define @test_vluxseg4_nxv4i16_nxv4i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i16_nxv4i16: @@ -2004,18 +2004,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i8(i16*, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i8(,,,, i16*, , , i32) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i8(,,,, i16*, , , i32, i32) define @test_vluxseg4_nxv4i16_nxv4i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i16_nxv4i8: @@ -2037,18 +2037,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i32(i16*, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i32(,,,, i16*, , , i32) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i32(,,,, i16*, , , i32, i32) define @test_vluxseg4_nxv4i16_nxv4i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i16_nxv4i32: @@ -2070,18 +2070,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i16(i16*, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i16(,,,,, i16*, , , i32) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i16(,,,,, i16*, , , i32, i32) define @test_vluxseg5_nxv4i16_nxv4i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4i16_nxv4i16: @@ -2104,18 +2104,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i8(i16*, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i8(,,,,, i16*, , , i32) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i8(,,,,, i16*, , , i32, i32) define @test_vluxseg5_nxv4i16_nxv4i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4i16_nxv4i8: @@ -2138,18 +2138,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i32(i16*, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i32(,,,,, i16*, , , i32) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i32(,,,,, i16*, , , i32, i32) define @test_vluxseg5_nxv4i16_nxv4i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4i16_nxv4i32: @@ -2172,18 +2172,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i16(i16*, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i16(,,,,,, i16*, , , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i16(,,,,,, i16*, , , i32, i32) define @test_vluxseg6_nxv4i16_nxv4i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4i16_nxv4i16: @@ -2207,18 +2207,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i8(i16*, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i8(,,,,,, i16*, , , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i8(,,,,,, i16*, , , i32, i32) define @test_vluxseg6_nxv4i16_nxv4i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4i16_nxv4i8: @@ -2242,18 +2242,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i32(i16*, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i32(,,,,,, i16*, , , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i32(,,,,,, i16*, , , i32, i32) define @test_vluxseg6_nxv4i16_nxv4i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4i16_nxv4i32: @@ -2277,18 +2277,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i16(i16*, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i16(,,,,,,, i16*, , , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i16(,,,,,,, i16*, , , i32, i32) define @test_vluxseg7_nxv4i16_nxv4i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4i16_nxv4i16: @@ -2313,18 +2313,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i8(i16*, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i8(,,,,,,, i16*, , , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i8(,,,,,,, i16*, , , i32, i32) define @test_vluxseg7_nxv4i16_nxv4i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4i16_nxv4i8: @@ -2349,18 +2349,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i32(i16*, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i32(,,,,,,, i16*, , , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i32(,,,,,,, i16*, , , i32, i32) define @test_vluxseg7_nxv4i16_nxv4i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4i16_nxv4i32: @@ -2385,18 +2385,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i16(i16*, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i16(,,,,,,,, i16*, , , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i16(,,,,,,,, i16*, , , i32, i32) define @test_vluxseg8_nxv4i16_nxv4i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4i16_nxv4i16: @@ -2422,18 +2422,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i8(i16*, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i8(,,,,,,,, i16*, , , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i8(,,,,,,,, i16*, , , i32, i32) define @test_vluxseg8_nxv4i16_nxv4i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4i16_nxv4i8: @@ -2459,18 +2459,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i32(i16*, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i32(,,,,,,,, i16*, , , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i32(,,,,,,,, i16*, , , i32, i32) define @test_vluxseg8_nxv4i16_nxv4i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4i16_nxv4i32: @@ -2496,18 +2496,18 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv1i32.nxv1i8(i32*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i8(,, i32*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i8(,, i32*, , , i32, i32) define @test_vluxseg2_nxv1i32_nxv1i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i32_nxv1i8: @@ -2526,18 +2526,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i8( %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i8( %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv1i32.nxv1i32(i32*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32(,, i32*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32(,, i32*, , , i32, i32) define @test_vluxseg2_nxv1i32_nxv1i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i32_nxv1i32: @@ -2556,18 +2556,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32( %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32( %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv1i32.nxv1i16(i32*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i16(,, i32*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i16(,, i32*, , , i32, i32) define @test_vluxseg2_nxv1i32_nxv1i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i32_nxv1i16: @@ -2586,18 +2586,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i16( %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i16( %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv1i32.nxv1i8(i32*, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i8(,,, i32*, , , i32) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i8(,,, i32*, , , i32, i32) define @test_vluxseg3_nxv1i32_nxv1i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i32_nxv1i8: @@ -2618,18 +2618,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i8( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i8( %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv1i32.nxv1i32(i32*, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i32(,,, i32*, , , i32) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i32(,,, i32*, , , i32, i32) define @test_vluxseg3_nxv1i32_nxv1i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i32_nxv1i32: @@ -2650,18 +2650,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i32( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i32( %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv1i32.nxv1i16(i32*, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i16(,,, i32*, , , i32) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i16(,,, i32*, , , i32, i32) define @test_vluxseg3_nxv1i32_nxv1i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i32_nxv1i16: @@ -2682,18 +2682,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i16( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i16( %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv1i32.nxv1i8(i32*, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i8(,,,, i32*, , , i32) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i8(,,,, i32*, , , i32, i32) define @test_vluxseg4_nxv1i32_nxv1i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i32_nxv1i8: @@ -2715,18 +2715,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv1i32.nxv1i32(i32*, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i32(,,,, i32*, , , i32) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i32(,,,, i32*, , , i32, i32) define @test_vluxseg4_nxv1i32_nxv1i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i32_nxv1i32: @@ -2748,18 +2748,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv1i32.nxv1i16(i32*, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i16(,,,, i32*, , , i32) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i16(,,,, i32*, , , i32, i32) define @test_vluxseg4_nxv1i32_nxv1i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i32_nxv1i16: @@ -2781,18 +2781,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv1i32.nxv1i8(i32*, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i8(,,,,, i32*, , , i32) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i8(,,,,, i32*, , , i32, i32) define @test_vluxseg5_nxv1i32_nxv1i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i32_nxv1i8: @@ -2815,18 +2815,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv1i32.nxv1i32(i32*, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i32(,,,,, i32*, , , i32) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i32(,,,,, i32*, , , i32, i32) define @test_vluxseg5_nxv1i32_nxv1i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i32_nxv1i32: @@ -2849,18 +2849,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv1i32.nxv1i16(i32*, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i16(,,,,, i32*, , , i32) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i16(,,,,, i32*, , , i32, i32) define @test_vluxseg5_nxv1i32_nxv1i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i32_nxv1i16: @@ -2883,18 +2883,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i32.nxv1i8(i32*, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i8(,,,,,, i32*, , , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i8(,,,,,, i32*, , , i32, i32) define @test_vluxseg6_nxv1i32_nxv1i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i32_nxv1i8: @@ -2918,18 +2918,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i32.nxv1i32(i32*, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i32(,,,,,, i32*, , , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i32(,,,,,, i32*, , , i32, i32) define @test_vluxseg6_nxv1i32_nxv1i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i32_nxv1i32: @@ -2953,18 +2953,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i32.nxv1i16(i32*, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i16(,,,,,, i32*, , , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i16(,,,,,, i32*, , , i32, i32) define @test_vluxseg6_nxv1i32_nxv1i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i32_nxv1i16: @@ -2988,18 +2988,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i32.nxv1i8(i32*, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i8(,,,,,,, i32*, , , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i8(,,,,,,, i32*, , , i32, i32) define @test_vluxseg7_nxv1i32_nxv1i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i32_nxv1i8: @@ -3024,18 +3024,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i32.nxv1i32(i32*, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i32(,,,,,,, i32*, , , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i32(,,,,,,, i32*, , , i32, i32) define @test_vluxseg7_nxv1i32_nxv1i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i32_nxv1i32: @@ -3060,18 +3060,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i32.nxv1i16(i32*, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i16(,,,,,,, i32*, , , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i16(,,,,,,, i32*, , , i32, i32) define @test_vluxseg7_nxv1i32_nxv1i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i32_nxv1i16: @@ -3096,18 +3096,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i32.nxv1i8(i32*, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i8(,,,,,,,, i32*, , , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i8(,,,,,,,, i32*, , , i32, i32) define @test_vluxseg8_nxv1i32_nxv1i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i32_nxv1i8: @@ -3133,18 +3133,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i32.nxv1i32(i32*, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i32(,,,,,,,, i32*, , , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i32(,,,,,,,, i32*, , , i32, i32) define @test_vluxseg8_nxv1i32_nxv1i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i32_nxv1i32: @@ -3170,18 +3170,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i32.nxv1i16(i32*, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i16(,,,,,,,, i32*, , , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i16(,,,,,,,, i32*, , , i32, i32) define @test_vluxseg8_nxv1i32_nxv1i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i32_nxv1i16: @@ -3207,18 +3207,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i16(i16*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i16(,, i16*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i16(,, i16*, , , i32, i32) define @test_vluxseg2_nxv8i16_nxv8i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i16_nxv8i16: @@ -3237,18 +3237,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i16( %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i16( %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i8(i16*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i8(,, i16*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i8(,, i16*, , , i32, i32) define @test_vluxseg2_nxv8i16_nxv8i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i16_nxv8i8: @@ -3267,18 +3267,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i8( %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i8( %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i32(i16*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i32(,, i16*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i32(,, i16*, , , i32, i32) define @test_vluxseg2_nxv8i16_nxv8i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i16_nxv8i32: @@ -3297,18 +3297,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i32( %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i32( %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i16(i16*, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i16(,,, i16*, , , i32) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i16(,,, i16*, , , i32, i32) define @test_vluxseg3_nxv8i16_nxv8i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8i16_nxv8i16: @@ -3329,18 +3329,18 @@ ; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i8(i16*, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i8(,,, i16*, , , i32) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i8(,,, i16*, , , i32, i32) define @test_vluxseg3_nxv8i16_nxv8i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8i16_nxv8i8: @@ -3361,18 +3361,18 @@ ; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i32(i16*, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i32(,,, i16*, , , i32) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i32(,,, i16*, , , i32, i32) define @test_vluxseg3_nxv8i16_nxv8i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8i16_nxv8i32: @@ -3392,18 +3392,18 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v6, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i32( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i32( %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i16(i16*, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i16(,,,, i16*, , , i32) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i16(,,,, i16*, , , i32, i32) define @test_vluxseg4_nxv8i16_nxv8i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8i16_nxv8i16: @@ -3425,18 +3425,18 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i8(i16*, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i8(,,,, i16*, , , i32) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i8(,,,, i16*, , , i32, i32) define @test_vluxseg4_nxv8i16_nxv8i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8i16_nxv8i8: @@ -3458,18 +3458,18 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i32(i16*, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i32(,,,, i16*, , , i32) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i32(,,,, i16*, , , i32, i32) define @test_vluxseg4_nxv8i16_nxv8i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8i16_nxv8i32: @@ -3491,18 +3491,18 @@ ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i16(i8*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i16(,, i8*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i16(,, i8*, , , i32, i32) define @test_vluxseg2_nxv8i8_nxv8i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i8_nxv8i16: @@ -3521,18 +3521,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i16( %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i16( %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i8(i8*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i8(,, i8*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i8(,, i8*, , , i32, i32) define @test_vluxseg2_nxv8i8_nxv8i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i8_nxv8i8: @@ -3551,18 +3551,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i8( %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i8( %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i32(i8*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i32(,, i8*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i32(,, i8*, , , i32, i32) define @test_vluxseg2_nxv8i8_nxv8i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i8_nxv8i32: @@ -3581,18 +3581,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i32( %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i32( %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i16(i8*, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i16(,,, i8*, , , i32) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i16(,,, i8*, , , i32, i32) define @test_vluxseg3_nxv8i8_nxv8i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8i8_nxv8i16: @@ -3612,18 +3612,18 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i8(i8*, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i8(,,, i8*, , , i32) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i8(,,, i8*, , , i32, i32) define @test_vluxseg3_nxv8i8_nxv8i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8i8_nxv8i8: @@ -3644,18 +3644,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i32(i8*, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i32(,,, i8*, , , i32) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i32(,,, i8*, , , i32, i32) define @test_vluxseg3_nxv8i8_nxv8i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8i8_nxv8i32: @@ -3675,18 +3675,18 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i16(i8*, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i16(,,,, i8*, , , i32) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i16(,,,, i8*, , , i32, i32) define @test_vluxseg4_nxv8i8_nxv8i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8i8_nxv8i16: @@ -3708,18 +3708,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i8(i8*, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i8(,,,, i8*, , , i32) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i8(,,,, i8*, , , i32, i32) define @test_vluxseg4_nxv8i8_nxv8i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8i8_nxv8i8: @@ -3741,18 +3741,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i32(i8*, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i32(,,,, i8*, , , i32) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i32(,,,, i8*, , , i32, i32) define @test_vluxseg4_nxv8i8_nxv8i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8i8_nxv8i32: @@ -3773,18 +3773,18 @@ ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i16(i8*, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i16(,,,,, i8*, , , i32) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i16(,,,,, i8*, , , i32, i32) define @test_vluxseg5_nxv8i8_nxv8i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv8i8_nxv8i16: @@ -3807,18 +3807,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i8(i8*, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i8(,,,,, i8*, , , i32) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i8(,,,,, i8*, , , i32, i32) define @test_vluxseg5_nxv8i8_nxv8i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv8i8_nxv8i8: @@ -3841,18 +3841,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i32(i8*, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i32(,,,,, i8*, , , i32) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i32(,,,,, i8*, , , i32, i32) define @test_vluxseg5_nxv8i8_nxv8i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv8i8_nxv8i32: @@ -3874,18 +3874,18 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i16(i8*, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i16(,,,,,, i8*, , , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i16(,,,,,, i8*, , , i32, i32) define @test_vluxseg6_nxv8i8_nxv8i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv8i8_nxv8i16: @@ -3909,18 +3909,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i8(i8*, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i8(,,,,,, i8*, , , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i8(,,,,,, i8*, , , i32, i32) define @test_vluxseg6_nxv8i8_nxv8i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv8i8_nxv8i8: @@ -3944,18 +3944,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i32(i8*, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i32(,,,,,, i8*, , , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i32(,,,,,, i8*, , , i32, i32) define @test_vluxseg6_nxv8i8_nxv8i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv8i8_nxv8i32: @@ -3979,18 +3979,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v12, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i16(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i16(,,,,,,, i8*, , , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i16(,,,,,,, i8*, , , i32, i32) define @test_vluxseg7_nxv8i8_nxv8i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv8i8_nxv8i16: @@ -4015,18 +4015,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i8(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i8(,,,,,,, i8*, , , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i8(,,,,,,, i8*, , , i32, i32) define @test_vluxseg7_nxv8i8_nxv8i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv8i8_nxv8i8: @@ -4051,18 +4051,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i32(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i32(,,,,,,, i8*, , , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i32(,,,,,,, i8*, , , i32, i32) define @test_vluxseg7_nxv8i8_nxv8i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv8i8_nxv8i32: @@ -4087,18 +4087,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v12, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i16(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i16(,,,,,,,, i8*, , , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i16(,,,,,,,, i8*, , , i32, i32) define @test_vluxseg8_nxv8i8_nxv8i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv8i8_nxv8i16: @@ -4124,18 +4124,18 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i8(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i8(,,,,,,,, i8*, , , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i8(,,,,,,,, i8*, , , i32, i32) define @test_vluxseg8_nxv8i8_nxv8i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv8i8_nxv8i8: @@ -4161,18 +4161,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i32(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i32(,,,,,,,, i8*, , , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i32(,,,,,,,, i8*, , , i32, i32) define @test_vluxseg8_nxv8i8_nxv8i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv8i8_nxv8i32: @@ -4198,18 +4198,18 @@ ; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vmv1r.v v22, v16 ; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i16(i32*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i16(,, i32*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i16(,, i32*, , , i32, i32) define @test_vluxseg2_nxv8i32_nxv8i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i32_nxv8i16: @@ -4228,18 +4228,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i16( %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i16( %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i8(i32*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i8(,, i32*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i8(,, i32*, , , i32, i32) define @test_vluxseg2_nxv8i32_nxv8i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i32_nxv8i8: @@ -4258,18 +4258,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv8i32_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i8( %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i8( %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i32(i32*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i32(,, i32*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i32(,, i32*, , , i32, i32) define @test_vluxseg2_nxv8i32_nxv8i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i32_nxv8i32: @@ -4288,18 +4288,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i32( %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i32( %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv4i8.nxv4i16(i8*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i16(,, i8*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i16(,, i8*, , , i32, i32) define @test_vluxseg2_nxv4i8_nxv4i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i8_nxv4i16: @@ -4318,18 +4318,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i16( %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i16( %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv4i8.nxv4i8(i8*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i8(,, i8*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i8(,, i8*, , , i32, i32) define @test_vluxseg2_nxv4i8_nxv4i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i8_nxv4i8: @@ -4348,18 +4348,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i8( %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i8( %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv4i8.nxv4i32(i8*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i32(,, i8*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i32(,, i8*, , , i32, i32) define @test_vluxseg2_nxv4i8_nxv4i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i8_nxv4i32: @@ -4378,18 +4378,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i32( %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i32( %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv4i8.nxv4i16(i8*, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i16(,,, i8*, , , i32) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i16(,,, i8*, , , i32, i32) define @test_vluxseg3_nxv4i8_nxv4i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i8_nxv4i16: @@ -4410,18 +4410,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv4i8.nxv4i8(i8*, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i8(,,, i8*, , , i32) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i8(,,, i8*, , , i32, i32) define @test_vluxseg3_nxv4i8_nxv4i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i8_nxv4i8: @@ -4442,18 +4442,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv4i8.nxv4i32(i8*, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i32(,,, i8*, , , i32) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i32(,,, i8*, , , i32, i32) define @test_vluxseg3_nxv4i8_nxv4i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i8_nxv4i32: @@ -4473,18 +4473,18 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv4i8.nxv4i16(i8*, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i16(,,,, i8*, , , i32) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i16(,,,, i8*, , , i32, i32) define @test_vluxseg4_nxv4i8_nxv4i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i8_nxv4i16: @@ -4506,18 +4506,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv4i8.nxv4i8(i8*, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i8(,,,, i8*, , , i32) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i8(,,,, i8*, , , i32, i32) define @test_vluxseg4_nxv4i8_nxv4i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i8_nxv4i8: @@ -4539,18 +4539,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv4i8.nxv4i32(i8*, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i32(,,,, i8*, , , i32) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i32(,,,, i8*, , , i32, i32) define @test_vluxseg4_nxv4i8_nxv4i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i8_nxv4i32: @@ -4572,18 +4572,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv4i8.nxv4i16(i8*, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i16(,,,,, i8*, , , i32) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i16(,,,,, i8*, , , i32, i32) define @test_vluxseg5_nxv4i8_nxv4i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4i8_nxv4i16: @@ -4606,18 +4606,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv4i8.nxv4i8(i8*, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i8(,,,,, i8*, , , i32) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i8(,,,,, i8*, , , i32, i32) define @test_vluxseg5_nxv4i8_nxv4i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4i8_nxv4i8: @@ -4640,18 +4640,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv4i8.nxv4i32(i8*, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i32(,,,,, i8*, , , i32) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i32(,,,,, i8*, , , i32, i32) define @test_vluxseg5_nxv4i8_nxv4i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4i8_nxv4i32: @@ -4674,18 +4674,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv4i8.nxv4i16(i8*, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i16(,,,,,, i8*, , , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i16(,,,,,, i8*, , , i32, i32) define @test_vluxseg6_nxv4i8_nxv4i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4i8_nxv4i16: @@ -4709,18 +4709,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv4i8.nxv4i8(i8*, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i8(,,,,,, i8*, , , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i8(,,,,,, i8*, , , i32, i32) define @test_vluxseg6_nxv4i8_nxv4i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4i8_nxv4i8: @@ -4744,18 +4744,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv4i8.nxv4i32(i8*, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i32(,,,,,, i8*, , , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i32(,,,,,, i8*, , , i32, i32) define @test_vluxseg6_nxv4i8_nxv4i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4i8_nxv4i32: @@ -4779,18 +4779,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4i8.nxv4i16(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i16(,,,,,,, i8*, , , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i16(,,,,,,, i8*, , , i32, i32) define @test_vluxseg7_nxv4i8_nxv4i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4i8_nxv4i16: @@ -4815,18 +4815,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4i8.nxv4i8(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i8(,,,,,,, i8*, , , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i8(,,,,,,, i8*, , , i32, i32) define @test_vluxseg7_nxv4i8_nxv4i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4i8_nxv4i8: @@ -4851,18 +4851,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4i8.nxv4i32(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i32(,,,,,,, i8*, , , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i32(,,,,,,, i8*, , , i32, i32) define @test_vluxseg7_nxv4i8_nxv4i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4i8_nxv4i32: @@ -4887,18 +4887,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i8.nxv4i16(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i16(,,,,,,,, i8*, , , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i16(,,,,,,,, i8*, , , i32, i32) define @test_vluxseg8_nxv4i8_nxv4i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4i8_nxv4i16: @@ -4924,18 +4924,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i8.nxv4i8(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i8(,,,,,,,, i8*, , , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i8(,,,,,,,, i8*, , , i32, i32) define @test_vluxseg8_nxv4i8_nxv4i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4i8_nxv4i8: @@ -4961,18 +4961,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i8.nxv4i32(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i32(,,,,,,,, i8*, , , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i32(,,,,,,,, i8*, , , i32, i32) define @test_vluxseg8_nxv4i8_nxv4i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4i8_nxv4i32: @@ -4998,18 +4998,18 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv1i16.nxv1i8(i16*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i8(,, i16*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i8(,, i16*, , , i32, i32) define @test_vluxseg2_nxv1i16_nxv1i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i16_nxv1i8: @@ -5028,18 +5028,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i8( %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i8( %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv1i16.nxv1i32(i16*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i32(,, i16*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i32(,, i16*, , , i32, i32) define @test_vluxseg2_nxv1i16_nxv1i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i16_nxv1i32: @@ -5058,18 +5058,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i32( %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i32( %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv1i16.nxv1i16(i16*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i16(,, i16*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i16(,, i16*, , , i32, i32) define @test_vluxseg2_nxv1i16_nxv1i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i16_nxv1i16: @@ -5088,18 +5088,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i16( %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i16( %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv1i16.nxv1i8(i16*, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i8(,,, i16*, , , i32) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i8(,,, i16*, , , i32, i32) define @test_vluxseg3_nxv1i16_nxv1i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i16_nxv1i8: @@ -5120,18 +5120,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv1i16.nxv1i32(i16*, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i32(,,, i16*, , , i32) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i32(,,, i16*, , , i32, i32) define @test_vluxseg3_nxv1i16_nxv1i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i16_nxv1i32: @@ -5152,18 +5152,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i32( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i32( %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv1i16.nxv1i16(i16*, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i16(,,, i16*, , , i32) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i16(,,, i16*, , , i32, i32) define @test_vluxseg3_nxv1i16_nxv1i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i16_nxv1i16: @@ -5184,18 +5184,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv1i16.nxv1i8(i16*, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i8(,,,, i16*, , , i32) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i8(,,,, i16*, , , i32, i32) define @test_vluxseg4_nxv1i16_nxv1i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i16_nxv1i8: @@ -5217,18 +5217,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv1i16.nxv1i32(i16*, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i32(,,,, i16*, , , i32) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i32(,,,, i16*, , , i32, i32) define @test_vluxseg4_nxv1i16_nxv1i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i16_nxv1i32: @@ -5250,18 +5250,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv1i16.nxv1i16(i16*, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i16(,,,, i16*, , , i32) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i16(,,,, i16*, , , i32, i32) define @test_vluxseg4_nxv1i16_nxv1i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i16_nxv1i16: @@ -5283,18 +5283,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv1i16.nxv1i8(i16*, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i8(,,,,, i16*, , , i32) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i8(,,,,, i16*, , , i32, i32) define @test_vluxseg5_nxv1i16_nxv1i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i16_nxv1i8: @@ -5317,18 +5317,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv1i16.nxv1i32(i16*, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i32(,,,,, i16*, , , i32) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i32(,,,,, i16*, , , i32, i32) define @test_vluxseg5_nxv1i16_nxv1i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i16_nxv1i32: @@ -5351,18 +5351,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv1i16.nxv1i16(i16*, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i16(,,,,, i16*, , , i32) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i16(,,,,, i16*, , , i32, i32) define @test_vluxseg5_nxv1i16_nxv1i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i16_nxv1i16: @@ -5385,18 +5385,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i16.nxv1i8(i16*, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i8(,,,,,, i16*, , , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i8(,,,,,, i16*, , , i32, i32) define @test_vluxseg6_nxv1i16_nxv1i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i16_nxv1i8: @@ -5420,18 +5420,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i16.nxv1i32(i16*, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i32(,,,,,, i16*, , , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i32(,,,,,, i16*, , , i32, i32) define @test_vluxseg6_nxv1i16_nxv1i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i16_nxv1i32: @@ -5455,18 +5455,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i16.nxv1i16(i16*, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i16(,,,,,, i16*, , , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i16(,,,,,, i16*, , , i32, i32) define @test_vluxseg6_nxv1i16_nxv1i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i16_nxv1i16: @@ -5490,18 +5490,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i16.nxv1i8(i16*, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i8(,,,,,,, i16*, , , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i8(,,,,,,, i16*, , , i32, i32) define @test_vluxseg7_nxv1i16_nxv1i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i16_nxv1i8: @@ -5526,18 +5526,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i16.nxv1i32(i16*, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i32(,,,,,,, i16*, , , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i32(,,,,,,, i16*, , , i32, i32) define @test_vluxseg7_nxv1i16_nxv1i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i16_nxv1i32: @@ -5562,18 +5562,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i16.nxv1i16(i16*, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i16(,,,,,,, i16*, , , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i16(,,,,,,, i16*, , , i32, i32) define @test_vluxseg7_nxv1i16_nxv1i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i16_nxv1i16: @@ -5598,18 +5598,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i16.nxv1i8(i16*, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i8(,,,,,,,, i16*, , , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i8(,,,,,,,, i16*, , , i32, i32) define @test_vluxseg8_nxv1i16_nxv1i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i16_nxv1i8: @@ -5635,18 +5635,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i16.nxv1i32(i16*, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i32(,,,,,,,, i16*, , , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i32(,,,,,,,, i16*, , , i32, i32) define @test_vluxseg8_nxv1i16_nxv1i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i16_nxv1i32: @@ -5672,18 +5672,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i16.nxv1i16(i16*, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i16(,,,,,,,, i16*, , , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i16(,,,,,,,, i16*, , , i32, i32) define @test_vluxseg8_nxv1i16_nxv1i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i16_nxv1i16: @@ -5709,18 +5709,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv32i8.nxv32i16(i8*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i16(,, i8*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i16(,, i8*, , , i32, i32) define @test_vluxseg2_nxv32i8_nxv32i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv32i8_nxv32i16: @@ -5739,18 +5739,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i16( %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i16( %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv32i8.nxv32i8(i8*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i8(,, i8*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i8(,, i8*, , , i32, i32) define @test_vluxseg2_nxv32i8_nxv32i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv32i8_nxv32i8: @@ -5769,18 +5769,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i8( %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i8( %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv2i8.nxv2i32(i8*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i32(,, i8*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i32(,, i8*, , , i32, i32) define @test_vluxseg2_nxv2i8_nxv2i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i8_nxv2i32: @@ -5799,18 +5799,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i32( %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i32( %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv2i8.nxv2i8(i8*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i8(,, i8*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i8(,, i8*, , , i32, i32) define @test_vluxseg2_nxv2i8_nxv2i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i8_nxv2i8: @@ -5829,18 +5829,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i8( %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i8( %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv2i8.nxv2i16(i8*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i16(,, i8*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i16(,, i8*, , , i32, i32) define @test_vluxseg2_nxv2i8_nxv2i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i8_nxv2i16: @@ -5859,18 +5859,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i16( %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i16( %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv2i8.nxv2i32(i8*, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i32(,,, i8*, , , i32) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i32(,,, i8*, , , i32, i32) define @test_vluxseg3_nxv2i8_nxv2i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i8_nxv2i32: @@ -5891,18 +5891,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv2i8.nxv2i8(i8*, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i8(,,, i8*, , , i32) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i8(,,, i8*, , , i32, i32) define @test_vluxseg3_nxv2i8_nxv2i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i8_nxv2i8: @@ -5923,18 +5923,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv2i8.nxv2i16(i8*, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i16(,,, i8*, , , i32) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i16(,,, i8*, , , i32, i32) define @test_vluxseg3_nxv2i8_nxv2i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i8_nxv2i16: @@ -5955,18 +5955,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv2i8.nxv2i32(i8*, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i32(,,,, i8*, , , i32) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i32(,,,, i8*, , , i32, i32) define @test_vluxseg4_nxv2i8_nxv2i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i8_nxv2i32: @@ -5988,18 +5988,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv2i8.nxv2i8(i8*, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i8(,,,, i8*, , , i32) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i8(,,,, i8*, , , i32, i32) define @test_vluxseg4_nxv2i8_nxv2i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i8_nxv2i8: @@ -6021,18 +6021,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv2i8.nxv2i16(i8*, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i16(,,,, i8*, , , i32) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i16(,,,, i8*, , , i32, i32) define @test_vluxseg4_nxv2i8_nxv2i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i8_nxv2i16: @@ -6054,18 +6054,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv2i8.nxv2i32(i8*, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i32(,,,,, i8*, , , i32) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i32(,,,,, i8*, , , i32, i32) define @test_vluxseg5_nxv2i8_nxv2i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i8_nxv2i32: @@ -6088,18 +6088,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv2i8.nxv2i8(i8*, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i8(,,,,, i8*, , , i32) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i8(,,,,, i8*, , , i32, i32) define @test_vluxseg5_nxv2i8_nxv2i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i8_nxv2i8: @@ -6122,18 +6122,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv2i8.nxv2i16(i8*, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i16(,,,,, i8*, , , i32) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i16(,,,,, i8*, , , i32, i32) define @test_vluxseg5_nxv2i8_nxv2i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i8_nxv2i16: @@ -6156,18 +6156,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i8.nxv2i32(i8*, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i32(,,,,,, i8*, , , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i32(,,,,,, i8*, , , i32, i32) define @test_vluxseg6_nxv2i8_nxv2i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i8_nxv2i32: @@ -6191,18 +6191,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i8.nxv2i8(i8*, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i8(,,,,,, i8*, , , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i8(,,,,,, i8*, , , i32, i32) define @test_vluxseg6_nxv2i8_nxv2i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i8_nxv2i8: @@ -6226,18 +6226,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i8.nxv2i16(i8*, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i16(,,,,,, i8*, , , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i16(,,,,,, i8*, , , i32, i32) define @test_vluxseg6_nxv2i8_nxv2i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i8_nxv2i16: @@ -6261,18 +6261,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i8.nxv2i32(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i32(,,,,,,, i8*, , , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i32(,,,,,,, i8*, , , i32, i32) define @test_vluxseg7_nxv2i8_nxv2i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i8_nxv2i32: @@ -6297,18 +6297,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i8.nxv2i8(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i8(,,,,,,, i8*, , , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i8(,,,,,,, i8*, , , i32, i32) define @test_vluxseg7_nxv2i8_nxv2i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i8_nxv2i8: @@ -6333,18 +6333,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i8.nxv2i16(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i16(,,,,,,, i8*, , , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i16(,,,,,,, i8*, , , i32, i32) define @test_vluxseg7_nxv2i8_nxv2i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i8_nxv2i16: @@ -6369,18 +6369,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i8.nxv2i32(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i32(,,,,,,,, i8*, , , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i32(,,,,,,,, i8*, , , i32, i32) define @test_vluxseg8_nxv2i8_nxv2i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i8_nxv2i32: @@ -6406,18 +6406,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i8.nxv2i8(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i8(,,,,,,,, i8*, , , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i8(,,,,,,,, i8*, , , i32, i32) define @test_vluxseg8_nxv2i8_nxv2i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i8_nxv2i8: @@ -6443,18 +6443,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i8.nxv2i16(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i16(,,,,,,,, i8*, , , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i16(,,,,,,,, i8*, , , i32, i32) define @test_vluxseg8_nxv2i8_nxv2i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i8_nxv2i16: @@ -6480,18 +6480,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv2i16.nxv2i32(i16*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i32(,, i16*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i32(,, i16*, , , i32, i32) define @test_vluxseg2_nxv2i16_nxv2i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i16_nxv2i32: @@ -6510,18 +6510,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i32( %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i32( %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv2i16.nxv2i8(i16*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i8(,, i16*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i8(,, i16*, , , i32, i32) define @test_vluxseg2_nxv2i16_nxv2i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i16_nxv2i8: @@ -6540,18 +6540,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i8( %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i8( %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv2i16.nxv2i16(i16*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i16(,, i16*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i16(,, i16*, , , i32, i32) define @test_vluxseg2_nxv2i16_nxv2i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i16_nxv2i16: @@ -6570,18 +6570,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i16( %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i16( %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv2i16.nxv2i32(i16*, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i32(,,, i16*, , , i32) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i32(,,, i16*, , , i32, i32) define @test_vluxseg3_nxv2i16_nxv2i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i16_nxv2i32: @@ -6602,18 +6602,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i32( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i32( %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv2i16.nxv2i8(i16*, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i8(,,, i16*, , , i32) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i8(,,, i16*, , , i32, i32) define @test_vluxseg3_nxv2i16_nxv2i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i16_nxv2i8: @@ -6634,18 +6634,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv2i16.nxv2i16(i16*, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i16(,,, i16*, , , i32) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i16(,,, i16*, , , i32, i32) define @test_vluxseg3_nxv2i16_nxv2i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i16_nxv2i16: @@ -6666,18 +6666,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv2i16.nxv2i32(i16*, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i32(,,,, i16*, , , i32) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i32(,,,, i16*, , , i32, i32) define @test_vluxseg4_nxv2i16_nxv2i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i16_nxv2i32: @@ -6699,18 +6699,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv2i16.nxv2i8(i16*, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i8(,,,, i16*, , , i32) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i8(,,,, i16*, , , i32, i32) define @test_vluxseg4_nxv2i16_nxv2i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i16_nxv2i8: @@ -6732,18 +6732,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv2i16.nxv2i16(i16*, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i16(,,,, i16*, , , i32) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i16(,,,, i16*, , , i32, i32) define @test_vluxseg4_nxv2i16_nxv2i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i16_nxv2i16: @@ -6765,18 +6765,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv2i16.nxv2i32(i16*, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i32(,,,,, i16*, , , i32) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i32(,,,,, i16*, , , i32, i32) define @test_vluxseg5_nxv2i16_nxv2i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i16_nxv2i32: @@ -6799,18 +6799,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv2i16.nxv2i8(i16*, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i8(,,,,, i16*, , , i32) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i8(,,,,, i16*, , , i32, i32) define @test_vluxseg5_nxv2i16_nxv2i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i16_nxv2i8: @@ -6833,18 +6833,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv2i16.nxv2i16(i16*, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i16(,,,,, i16*, , , i32) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i16(,,,,, i16*, , , i32, i32) define @test_vluxseg5_nxv2i16_nxv2i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i16_nxv2i16: @@ -6867,18 +6867,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i16.nxv2i32(i16*, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i32(,,,,,, i16*, , , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i32(,,,,,, i16*, , , i32, i32) define @test_vluxseg6_nxv2i16_nxv2i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i16_nxv2i32: @@ -6902,18 +6902,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i16.nxv2i8(i16*, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i8(,,,,,, i16*, , , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i8(,,,,,, i16*, , , i32, i32) define @test_vluxseg6_nxv2i16_nxv2i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i16_nxv2i8: @@ -6937,18 +6937,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i16.nxv2i16(i16*, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i16(,,,,,, i16*, , , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i16(,,,,,, i16*, , , i32, i32) define @test_vluxseg6_nxv2i16_nxv2i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i16_nxv2i16: @@ -6972,18 +6972,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i16.nxv2i32(i16*, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i32(,,,,,,, i16*, , , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i32(,,,,,,, i16*, , , i32, i32) define @test_vluxseg7_nxv2i16_nxv2i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i16_nxv2i32: @@ -7008,18 +7008,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i16.nxv2i8(i16*, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i8(,,,,,,, i16*, , , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i8(,,,,,,, i16*, , , i32, i32) define @test_vluxseg7_nxv2i16_nxv2i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i16_nxv2i8: @@ -7044,18 +7044,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i16.nxv2i16(i16*, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i16(,,,,,,, i16*, , , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i16(,,,,,,, i16*, , , i32, i32) define @test_vluxseg7_nxv2i16_nxv2i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i16_nxv2i16: @@ -7080,18 +7080,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i16.nxv2i32(i16*, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i32(,,,,,,,, i16*, , , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i32(,,,,,,,, i16*, , , i32, i32) define @test_vluxseg8_nxv2i16_nxv2i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i16_nxv2i32: @@ -7117,18 +7117,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i16.nxv2i8(i16*, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i8(,,,,,,,, i16*, , , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i8(,,,,,,,, i16*, , , i32, i32) define @test_vluxseg8_nxv2i16_nxv2i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i16_nxv2i8: @@ -7154,18 +7154,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i16.nxv2i16(i16*, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i16(,,,,,,,, i16*, , , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i16(,,,,,,,, i16*, , , i32, i32) define @test_vluxseg8_nxv2i16_nxv2i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i16_nxv2i16: @@ -7191,18 +7191,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i16(i32*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i16(,, i32*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i16(,, i32*, , , i32, i32) define @test_vluxseg2_nxv4i32_nxv4i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i32_nxv4i16: @@ -7221,18 +7221,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i16( %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i16( %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i8(i32*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i8(,, i32*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i8(,, i32*, , , i32, i32) define @test_vluxseg2_nxv4i32_nxv4i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i32_nxv4i8: @@ -7251,18 +7251,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i8( %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i8( %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i32(i32*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i32(,, i32*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i32(,, i32*, , , i32, i32) define @test_vluxseg2_nxv4i32_nxv4i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i32_nxv4i32: @@ -7281,18 +7281,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i32( %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i32( %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i16(i32*, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i16(,,, i32*, , , i32) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i16(,,, i32*, , , i32, i32) define @test_vluxseg3_nxv4i32_nxv4i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i32_nxv4i16: @@ -7313,18 +7313,18 @@ ; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i16( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i16( %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i8(i32*, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i8(,,, i32*, , , i32) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i8(,,, i32*, , , i32, i32) define @test_vluxseg3_nxv4i32_nxv4i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i32_nxv4i8: @@ -7345,18 +7345,18 @@ ; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i8( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i8( %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i32(i32*, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i32(,,, i32*, , , i32) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i32(,,, i32*, , , i32, i32) define @test_vluxseg3_nxv4i32_nxv4i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i32_nxv4i32: @@ -7377,18 +7377,18 @@ ; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i32( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i32( %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i16(i32*, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i16(,,,, i32*, , , i32) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i16(,,,, i32*, , , i32, i32) define @test_vluxseg4_nxv4i32_nxv4i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i32_nxv4i16: @@ -7410,18 +7410,18 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i16( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i16( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i8(i32*, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i8(,,,, i32*, , , i32) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i8(,,,, i32*, , , i32, i32) define @test_vluxseg4_nxv4i32_nxv4i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i32_nxv4i8: @@ -7443,18 +7443,18 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i8( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i8( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i32(i32*, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i32(,,,, i32*, , , i32) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i32(,,,, i32*, , , i32, i32) define @test_vluxseg4_nxv4i32_nxv4i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i32_nxv4i32: @@ -7476,18 +7476,18 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i32( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i32( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv16f16.nxv16i16(half*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i16(,, half*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i16(,, half*, , , i32, i32) define @test_vluxseg2_nxv16f16_nxv16i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16f16_nxv16i16: @@ -7506,18 +7506,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i16( %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i16( %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv16f16.nxv16i8(half*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i8(,, half*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i8(,, half*, , , i32, i32) define @test_vluxseg2_nxv16f16_nxv16i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16f16_nxv16i8: @@ -7536,18 +7536,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i8( %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i8( %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv16f16.nxv16i32(half*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i32(,, half*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i32(,, half*, , , i32, i32) define @test_vluxseg2_nxv16f16_nxv16i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16f16_nxv16i32: @@ -7566,18 +7566,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v4, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i32( %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i32( %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i16(double*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i16(,, double*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i16(,, double*, , , i32, i32) define @test_vluxseg2_nxv4f64_nxv4i16(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f64_nxv4i16: @@ -7596,18 +7596,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv4f64_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i16( %val, %val, double* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i16( %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i8(double*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i8(,, double*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i8(,, double*, , , i32, i32) define @test_vluxseg2_nxv4f64_nxv4i8(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f64_nxv4i8: @@ -7626,18 +7626,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv4f64_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i8( %val, %val, double* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i8( %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i32(double*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i32(,, double*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i32(,, double*, , , i32, i32) define @test_vluxseg2_nxv4f64_nxv4i32(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f64_nxv4i32: @@ -7656,18 +7656,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i32( %val, %val, double* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i32( %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i8(double*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i8(,, double*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i8(,, double*, , , i32, i32) define @test_vluxseg2_nxv1f64_nxv1i8(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f64_nxv1i8: @@ -7686,18 +7686,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i8( %val, %val, double* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i8( %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i32(double*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i32(,, double*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i32(,, double*, , , i32, i32) define @test_vluxseg2_nxv1f64_nxv1i32(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f64_nxv1i32: @@ -7716,18 +7716,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i32( %val, %val, double* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i32( %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i16(double*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i16(,, double*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i16(,, double*, , , i32, i32) define @test_vluxseg2_nxv1f64_nxv1i16(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f64_nxv1i16: @@ -7746,18 +7746,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i16( %val, %val, double* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i16( %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i8(double*, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i8(,,, double*, , , i32) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i8(,,, double*, , , i32, i32) define @test_vluxseg3_nxv1f64_nxv1i8(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f64_nxv1i8: @@ -7778,18 +7778,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i8( %val, %val, %val, double* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i8( %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i32(double*, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i32(,,, double*, , , i32) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i32(,,, double*, , , i32, i32) define @test_vluxseg3_nxv1f64_nxv1i32(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f64_nxv1i32: @@ -7810,18 +7810,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i32( %val, %val, %val, double* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i32( %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i16(double*, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i16(,,, double*, , , i32) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i16(,,, double*, , , i32, i32) define @test_vluxseg3_nxv1f64_nxv1i16(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f64_nxv1i16: @@ -7842,18 +7842,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i16( %val, %val, %val, double* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i16( %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i8(double*, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i8(,,,, double*, , , i32) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i8(,,,, double*, , , i32, i32) define @test_vluxseg4_nxv1f64_nxv1i8(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f64_nxv1i8: @@ -7875,18 +7875,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i32(double*, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i32(,,,, double*, , , i32) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i32(,,,, double*, , , i32, i32) define @test_vluxseg4_nxv1f64_nxv1i32(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f64_nxv1i32: @@ -7908,18 +7908,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i16(double*, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i16(,,,, double*, , , i32) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i16(,,,, double*, , , i32, i32) define @test_vluxseg4_nxv1f64_nxv1i16(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f64_nxv1i16: @@ -7941,18 +7941,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i8(double*, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i8(,,,,, double*, , , i32) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i8(,,,,, double*, , , i32, i32) define @test_vluxseg5_nxv1f64_nxv1i8(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f64_nxv1i8: @@ -7975,18 +7975,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i32(double*, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i32(,,,,, double*, , , i32) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i32(,,,,, double*, , , i32, i32) define @test_vluxseg5_nxv1f64_nxv1i32(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f64_nxv1i32: @@ -8009,18 +8009,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i16(double*, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i16(,,,,, double*, , , i32) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i16(,,,,, double*, , , i32, i32) define @test_vluxseg5_nxv1f64_nxv1i16(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f64_nxv1i16: @@ -8043,18 +8043,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i8(double*, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i8(,,,,,, double*, , , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i8(,,,,,, double*, , , i32, i32) define @test_vluxseg6_nxv1f64_nxv1i8(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f64_nxv1i8: @@ -8078,18 +8078,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i32(double*, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i32(,,,,,, double*, , , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i32(,,,,,, double*, , , i32, i32) define @test_vluxseg6_nxv1f64_nxv1i32(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f64_nxv1i32: @@ -8113,18 +8113,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i16(double*, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i16(,,,,,, double*, , , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i16(,,,,,, double*, , , i32, i32) define @test_vluxseg6_nxv1f64_nxv1i16(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f64_nxv1i16: @@ -8148,18 +8148,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i8(double*, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i8(,,,,,,, double*, , , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i8(,,,,,,, double*, , , i32, i32) define @test_vluxseg7_nxv1f64_nxv1i8(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f64_nxv1i8: @@ -8184,18 +8184,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i32(double*, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i32(,,,,,,, double*, , , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i32(,,,,,,, double*, , , i32, i32) define @test_vluxseg7_nxv1f64_nxv1i32(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f64_nxv1i32: @@ -8220,18 +8220,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i16(double*, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i16(,,,,,,, double*, , , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i16(,,,,,,, double*, , , i32, i32) define @test_vluxseg7_nxv1f64_nxv1i16(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f64_nxv1i16: @@ -8256,18 +8256,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i8(double*, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i8(,,,,,,,, double*, , , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i8(,,,,,,,, double*, , , i32, i32) define @test_vluxseg8_nxv1f64_nxv1i8(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f64_nxv1i8: @@ -8293,18 +8293,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i32(double*, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i32(,,,,,,,, double*, , , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i32(,,,,,,,, double*, , , i32, i32) define @test_vluxseg8_nxv1f64_nxv1i32(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f64_nxv1i32: @@ -8330,18 +8330,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i16(double*, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i16(,,,,,,,, double*, , , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i16(,,,,,,,, double*, , , i32, i32) define @test_vluxseg8_nxv1f64_nxv1i16(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f64_nxv1i16: @@ -8367,18 +8367,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i32(float*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i32(,, float*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i32(,, float*, , , i32, i32) define @test_vluxseg2_nxv2f32_nxv2i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f32_nxv2i32: @@ -8397,18 +8397,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i32( %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i32( %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i8(float*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i8(,, float*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i8(,, float*, , , i32, i32) define @test_vluxseg2_nxv2f32_nxv2i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f32_nxv2i8: @@ -8427,18 +8427,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i8( %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i8( %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i16(float*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i16(,, float*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i16(,, float*, , , i32, i32) define @test_vluxseg2_nxv2f32_nxv2i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f32_nxv2i16: @@ -8457,18 +8457,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i16( %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i16( %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i32(float*, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i32(,,, float*, , , i32) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i32(,,, float*, , , i32, i32) define @test_vluxseg3_nxv2f32_nxv2i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f32_nxv2i32: @@ -8489,18 +8489,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i32( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i32( %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i8(float*, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i8(,,, float*, , , i32) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i8(,,, float*, , , i32, i32) define @test_vluxseg3_nxv2f32_nxv2i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f32_nxv2i8: @@ -8521,18 +8521,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i8( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i8( %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i16(float*, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i16(,,, float*, , , i32) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i16(,,, float*, , , i32, i32) define @test_vluxseg3_nxv2f32_nxv2i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f32_nxv2i16: @@ -8553,18 +8553,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i16( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i16( %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i32(float*, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i32(,,,, float*, , , i32) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i32(,,,, float*, , , i32, i32) define @test_vluxseg4_nxv2f32_nxv2i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f32_nxv2i32: @@ -8586,18 +8586,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i8(float*, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i8(,,,, float*, , , i32) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i8(,,,, float*, , , i32, i32) define @test_vluxseg4_nxv2f32_nxv2i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f32_nxv2i8: @@ -8619,18 +8619,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i16(float*, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i16(,,,, float*, , , i32) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i16(,,,, float*, , , i32, i32) define @test_vluxseg4_nxv2f32_nxv2i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f32_nxv2i16: @@ -8652,18 +8652,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i32(float*, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i32(,,,,, float*, , , i32) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i32(,,,,, float*, , , i32, i32) define @test_vluxseg5_nxv2f32_nxv2i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2f32_nxv2i32: @@ -8686,18 +8686,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i8(float*, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i8(,,,,, float*, , , i32) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i8(,,,,, float*, , , i32, i32) define @test_vluxseg5_nxv2f32_nxv2i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2f32_nxv2i8: @@ -8720,18 +8720,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i16(float*, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i16(,,,,, float*, , , i32) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i16(,,,,, float*, , , i32, i32) define @test_vluxseg5_nxv2f32_nxv2i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2f32_nxv2i16: @@ -8754,18 +8754,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i32(float*, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i32(,,,,,, float*, , , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i32(,,,,,, float*, , , i32, i32) define @test_vluxseg6_nxv2f32_nxv2i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2f32_nxv2i32: @@ -8789,18 +8789,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i8(float*, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i8(,,,,,, float*, , , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i8(,,,,,, float*, , , i32, i32) define @test_vluxseg6_nxv2f32_nxv2i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2f32_nxv2i8: @@ -8824,18 +8824,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i16(float*, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i16(,,,,,, float*, , , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i16(,,,,,, float*, , , i32, i32) define @test_vluxseg6_nxv2f32_nxv2i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2f32_nxv2i16: @@ -8859,18 +8859,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i32(float*, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i32(,,,,,,, float*, , , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i32(,,,,,,, float*, , , i32, i32) define @test_vluxseg7_nxv2f32_nxv2i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2f32_nxv2i32: @@ -8895,18 +8895,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i8(float*, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i8(,,,,,,, float*, , , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i8(,,,,,,, float*, , , i32, i32) define @test_vluxseg7_nxv2f32_nxv2i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2f32_nxv2i8: @@ -8931,18 +8931,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i16(float*, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i16(,,,,,,, float*, , , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i16(,,,,,,, float*, , , i32, i32) define @test_vluxseg7_nxv2f32_nxv2i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2f32_nxv2i16: @@ -8967,18 +8967,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i32(float*, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i32(,,,,,,,, float*, , , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i32(,,,,,,,, float*, , , i32, i32) define @test_vluxseg8_nxv2f32_nxv2i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2f32_nxv2i32: @@ -9004,18 +9004,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i8(float*, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i8(,,,,,,,, float*, , , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i8(,,,,,,,, float*, , , i32, i32) define @test_vluxseg8_nxv2f32_nxv2i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2f32_nxv2i8: @@ -9041,18 +9041,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i16(float*, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i16(,,,,,,,, float*, , , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i16(,,,,,,,, float*, , , i32, i32) define @test_vluxseg8_nxv2f32_nxv2i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2f32_nxv2i16: @@ -9078,18 +9078,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv1f16.nxv1i8(half*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i8(,, half*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i8(,, half*, , , i32, i32) define @test_vluxseg2_nxv1f16_nxv1i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f16_nxv1i8: @@ -9108,18 +9108,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i8( %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i8( %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv1f16.nxv1i32(half*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i32(,, half*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i32(,, half*, , , i32, i32) define @test_vluxseg2_nxv1f16_nxv1i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f16_nxv1i32: @@ -9138,18 +9138,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i32( %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i32( %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv1f16.nxv1i16(half*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i16(,, half*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i16(,, half*, , , i32, i32) define @test_vluxseg2_nxv1f16_nxv1i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f16_nxv1i16: @@ -9168,18 +9168,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i16( %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i16( %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv1f16.nxv1i8(half*, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i8(,,, half*, , , i32) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i8(,,, half*, , , i32, i32) define @test_vluxseg3_nxv1f16_nxv1i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f16_nxv1i8: @@ -9200,18 +9200,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv1f16.nxv1i32(half*, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i32(,,, half*, , , i32) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i32(,,, half*, , , i32, i32) define @test_vluxseg3_nxv1f16_nxv1i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f16_nxv1i32: @@ -9232,18 +9232,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i32( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i32( %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv1f16.nxv1i16(half*, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i16(,,, half*, , , i32) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i16(,,, half*, , , i32, i32) define @test_vluxseg3_nxv1f16_nxv1i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f16_nxv1i16: @@ -9264,18 +9264,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv1f16.nxv1i8(half*, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i8(,,,, half*, , , i32) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i8(,,,, half*, , , i32, i32) define @test_vluxseg4_nxv1f16_nxv1i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f16_nxv1i8: @@ -9297,18 +9297,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv1f16.nxv1i32(half*, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i32(,,,, half*, , , i32) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i32(,,,, half*, , , i32, i32) define @test_vluxseg4_nxv1f16_nxv1i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f16_nxv1i32: @@ -9330,18 +9330,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv1f16.nxv1i16(half*, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i16(,,,, half*, , , i32) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i16(,,,, half*, , , i32, i32) define @test_vluxseg4_nxv1f16_nxv1i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f16_nxv1i16: @@ -9363,18 +9363,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv1f16.nxv1i8(half*, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i8(,,,,, half*, , , i32) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i8(,,,,, half*, , , i32, i32) define @test_vluxseg5_nxv1f16_nxv1i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f16_nxv1i8: @@ -9397,18 +9397,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv1f16.nxv1i32(half*, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i32(,,,,, half*, , , i32) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i32(,,,,, half*, , , i32, i32) define @test_vluxseg5_nxv1f16_nxv1i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f16_nxv1i32: @@ -9431,18 +9431,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv1f16.nxv1i16(half*, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i16(,,,,, half*, , , i32) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i16(,,,,, half*, , , i32, i32) define @test_vluxseg5_nxv1f16_nxv1i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f16_nxv1i16: @@ -9465,18 +9465,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f16.nxv1i8(half*, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i8(,,,,,, half*, , , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i8(,,,,,, half*, , , i32, i32) define @test_vluxseg6_nxv1f16_nxv1i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f16_nxv1i8: @@ -9500,18 +9500,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f16.nxv1i32(half*, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i32(,,,,,, half*, , , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i32(,,,,,, half*, , , i32, i32) define @test_vluxseg6_nxv1f16_nxv1i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f16_nxv1i32: @@ -9535,18 +9535,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f16.nxv1i16(half*, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i16(,,,,,, half*, , , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i16(,,,,,, half*, , , i32, i32) define @test_vluxseg6_nxv1f16_nxv1i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f16_nxv1i16: @@ -9570,18 +9570,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f16.nxv1i8(half*, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i8(,,,,,,, half*, , , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i8(,,,,,,, half*, , , i32, i32) define @test_vluxseg7_nxv1f16_nxv1i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f16_nxv1i8: @@ -9606,18 +9606,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f16.nxv1i32(half*, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i32(,,,,,,, half*, , , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i32(,,,,,,, half*, , , i32, i32) define @test_vluxseg7_nxv1f16_nxv1i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f16_nxv1i32: @@ -9642,18 +9642,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f16.nxv1i16(half*, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i16(,,,,,,, half*, , , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i16(,,,,,,, half*, , , i32, i32) define @test_vluxseg7_nxv1f16_nxv1i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f16_nxv1i16: @@ -9678,18 +9678,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f16.nxv1i8(half*, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i8(,,,,,,,, half*, , , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i8(,,,,,,,, half*, , , i32, i32) define @test_vluxseg8_nxv1f16_nxv1i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f16_nxv1i8: @@ -9715,18 +9715,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f16.nxv1i32(half*, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i32(,,,,,,,, half*, , , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i32(,,,,,,,, half*, , , i32, i32) define @test_vluxseg8_nxv1f16_nxv1i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f16_nxv1i32: @@ -9752,18 +9752,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f16.nxv1i16(half*, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i16(,,,,,,,, half*, , , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i16(,,,,,,,, half*, , , i32, i32) define @test_vluxseg8_nxv1f16_nxv1i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f16_nxv1i16: @@ -9789,18 +9789,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv1f32.nxv1i8(float*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i8(,, float*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i8(,, float*, , , i32, i32) define @test_vluxseg2_nxv1f32_nxv1i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f32_nxv1i8: @@ -9819,18 +9819,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i8( %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i8( %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv1f32.nxv1i32(float*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i32(,, float*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i32(,, float*, , , i32, i32) define @test_vluxseg2_nxv1f32_nxv1i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f32_nxv1i32: @@ -9849,18 +9849,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i32( %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i32( %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv1f32.nxv1i16(float*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i16(,, float*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i16(,, float*, , , i32, i32) define @test_vluxseg2_nxv1f32_nxv1i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f32_nxv1i16: @@ -9879,18 +9879,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i16( %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i16( %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv1f32.nxv1i8(float*, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i8(,,, float*, , , i32) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i8(,,, float*, , , i32, i32) define @test_vluxseg3_nxv1f32_nxv1i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f32_nxv1i8: @@ -9911,18 +9911,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i8( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i8( %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv1f32.nxv1i32(float*, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i32(,,, float*, , , i32) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i32(,,, float*, , , i32, i32) define @test_vluxseg3_nxv1f32_nxv1i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f32_nxv1i32: @@ -9943,18 +9943,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i32( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i32( %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv1f32.nxv1i16(float*, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i16(,,, float*, , , i32) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i16(,,, float*, , , i32, i32) define @test_vluxseg3_nxv1f32_nxv1i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f32_nxv1i16: @@ -9975,18 +9975,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i16( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i16( %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv1f32.nxv1i8(float*, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i8(,,,, float*, , , i32) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i8(,,,, float*, , , i32, i32) define @test_vluxseg4_nxv1f32_nxv1i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f32_nxv1i8: @@ -10008,18 +10008,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv1f32.nxv1i32(float*, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i32(,,,, float*, , , i32) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i32(,,,, float*, , , i32, i32) define @test_vluxseg4_nxv1f32_nxv1i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f32_nxv1i32: @@ -10041,18 +10041,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv1f32.nxv1i16(float*, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i16(,,,, float*, , , i32) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i16(,,,, float*, , , i32, i32) define @test_vluxseg4_nxv1f32_nxv1i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f32_nxv1i16: @@ -10074,18 +10074,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv1f32.nxv1i8(float*, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i8(,,,,, float*, , , i32) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i8(,,,,, float*, , , i32, i32) define @test_vluxseg5_nxv1f32_nxv1i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f32_nxv1i8: @@ -10108,18 +10108,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv1f32.nxv1i32(float*, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i32(,,,,, float*, , , i32) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i32(,,,,, float*, , , i32, i32) define @test_vluxseg5_nxv1f32_nxv1i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f32_nxv1i32: @@ -10142,18 +10142,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv1f32.nxv1i16(float*, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i16(,,,,, float*, , , i32) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i16(,,,,, float*, , , i32, i32) define @test_vluxseg5_nxv1f32_nxv1i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f32_nxv1i16: @@ -10176,18 +10176,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f32.nxv1i8(float*, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i8(,,,,,, float*, , , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i8(,,,,,, float*, , , i32, i32) define @test_vluxseg6_nxv1f32_nxv1i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f32_nxv1i8: @@ -10211,18 +10211,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f32.nxv1i32(float*, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i32(,,,,,, float*, , , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i32(,,,,,, float*, , , i32, i32) define @test_vluxseg6_nxv1f32_nxv1i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f32_nxv1i32: @@ -10246,18 +10246,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f32.nxv1i16(float*, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i16(,,,,,, float*, , , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i16(,,,,,, float*, , , i32, i32) define @test_vluxseg6_nxv1f32_nxv1i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f32_nxv1i16: @@ -10281,18 +10281,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f32.nxv1i8(float*, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i8(,,,,,,, float*, , , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i8(,,,,,,, float*, , , i32, i32) define @test_vluxseg7_nxv1f32_nxv1i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f32_nxv1i8: @@ -10317,18 +10317,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f32.nxv1i32(float*, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i32(,,,,,,, float*, , , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i32(,,,,,,, float*, , , i32, i32) define @test_vluxseg7_nxv1f32_nxv1i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f32_nxv1i32: @@ -10353,18 +10353,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f32.nxv1i16(float*, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i16(,,,,,,, float*, , , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i16(,,,,,,, float*, , , i32, i32) define @test_vluxseg7_nxv1f32_nxv1i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f32_nxv1i16: @@ -10389,18 +10389,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f32.nxv1i8(float*, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i8(,,,,,,,, float*, , , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i8(,,,,,,,, float*, , , i32, i32) define @test_vluxseg8_nxv1f32_nxv1i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f32_nxv1i8: @@ -10426,18 +10426,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f32.nxv1i32(float*, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i32(,,,,,,,, float*, , , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i32(,,,,,,,, float*, , , i32, i32) define @test_vluxseg8_nxv1f32_nxv1i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f32_nxv1i32: @@ -10463,18 +10463,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f32.nxv1i16(float*, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i16(,,,,,,,, float*, , , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i16(,,,,,,,, float*, , , i32, i32) define @test_vluxseg8_nxv1f32_nxv1i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f32_nxv1i16: @@ -10500,18 +10500,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i16(half*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i16(,, half*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i16(,, half*, , , i32, i32) define @test_vluxseg2_nxv8f16_nxv8i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8f16_nxv8i16: @@ -10530,18 +10530,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i16( %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i16( %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i8(half*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i8(,, half*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i8(,, half*, , , i32, i32) define @test_vluxseg2_nxv8f16_nxv8i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8f16_nxv8i8: @@ -10560,18 +10560,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i8( %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i8( %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i32(half*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i32(,, half*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i32(,, half*, , , i32, i32) define @test_vluxseg2_nxv8f16_nxv8i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8f16_nxv8i32: @@ -10590,18 +10590,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i32( %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i32( %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i16(half*, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i16(,,, half*, , , i32) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i16(,,, half*, , , i32, i32) define @test_vluxseg3_nxv8f16_nxv8i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8f16_nxv8i16: @@ -10622,18 +10622,18 @@ ; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i8(half*, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i8(,,, half*, , , i32) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i8(,,, half*, , , i32, i32) define @test_vluxseg3_nxv8f16_nxv8i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8f16_nxv8i8: @@ -10654,18 +10654,18 @@ ; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i32(half*, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i32(,,, half*, , , i32) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i32(,,, half*, , , i32, i32) define @test_vluxseg3_nxv8f16_nxv8i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8f16_nxv8i32: @@ -10685,18 +10685,18 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v6, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i32( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i32( %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i16(half*, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i16(,,,, half*, , , i32) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i16(,,,, half*, , , i32, i32) define @test_vluxseg4_nxv8f16_nxv8i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8f16_nxv8i16: @@ -10718,18 +10718,18 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i8(half*, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i8(,,,, half*, , , i32) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i8(,,,, half*, , , i32, i32) define @test_vluxseg4_nxv8f16_nxv8i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8f16_nxv8i8: @@ -10751,18 +10751,18 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i32(half*, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i32(,,,, half*, , , i32) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i32(,,,, half*, , , i32, i32) define @test_vluxseg4_nxv8f16_nxv8i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8f16_nxv8i32: @@ -10784,18 +10784,18 @@ ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i16(float*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i16(,, float*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i16(,, float*, , , i32, i32) define @test_vluxseg2_nxv8f32_nxv8i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8f32_nxv8i16: @@ -10814,18 +10814,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i16( %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i16( %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i8(float*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i8(,, float*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i8(,, float*, , , i32, i32) define @test_vluxseg2_nxv8f32_nxv8i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8f32_nxv8i8: @@ -10844,18 +10844,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv8f32_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i8( %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i8( %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i32(float*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i32(,, float*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i32(,, float*, , , i32, i32) define @test_vluxseg2_nxv8f32_nxv8i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8f32_nxv8i32: @@ -10874,18 +10874,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i32( %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i32( %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i32(double*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i32(,, double*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i32(,, double*, , , i32, i32) define @test_vluxseg2_nxv2f64_nxv2i32(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f64_nxv2i32: @@ -10904,18 +10904,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i32( %val, %val, double* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i32( %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i8(double*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i8(,, double*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i8(,, double*, , , i32, i32) define @test_vluxseg2_nxv2f64_nxv2i8(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f64_nxv2i8: @@ -10934,18 +10934,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i8( %val, %val, double* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i8( %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i16(double*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i16(,, double*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i16(,, double*, , , i32, i32) define @test_vluxseg2_nxv2f64_nxv2i16(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f64_nxv2i16: @@ -10964,18 +10964,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i16( %val, %val, double* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i16( %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i32(double*, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i32(,,, double*, , , i32) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i32(,,, double*, , , i32, i32) define @test_vluxseg3_nxv2f64_nxv2i32(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f64_nxv2i32: @@ -10996,18 +10996,18 @@ ; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i32( %val, %val, %val, double* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i32( %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i8(double*, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i8(,,, double*, , , i32) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i8(,,, double*, , , i32, i32) define @test_vluxseg3_nxv2f64_nxv2i8(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f64_nxv2i8: @@ -11028,18 +11028,18 @@ ; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i8( %val, %val, %val, double* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i8( %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i16(double*, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i16(,,, double*, , , i32) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i16(,,, double*, , , i32, i32) define @test_vluxseg3_nxv2f64_nxv2i16(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f64_nxv2i16: @@ -11060,18 +11060,18 @@ ; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i16( %val, %val, %val, double* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i16( %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i32(double*, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i32(,,,, double*, , , i32) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i32(,,,, double*, , , i32, i32) define @test_vluxseg4_nxv2f64_nxv2i32(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f64_nxv2i32: @@ -11093,18 +11093,18 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i32( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i32( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i8(double*, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i8(,,,, double*, , , i32) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i8(,,,, double*, , , i32, i32) define @test_vluxseg4_nxv2f64_nxv2i8(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f64_nxv2i8: @@ -11126,18 +11126,18 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i8( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i8( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i16(double*, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i16(,,,, double*, , , i32) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i16(,,,, double*, , , i32, i32) define @test_vluxseg4_nxv2f64_nxv2i16(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f64_nxv2i16: @@ -11159,18 +11159,18 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i16( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i16( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i16(half*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i16(,, half*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i16(,, half*, , , i32, i32) define @test_vluxseg2_nxv4f16_nxv4i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f16_nxv4i16: @@ -11189,18 +11189,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i16( %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i16( %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i8(half*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i8(,, half*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i8(,, half*, , , i32, i32) define @test_vluxseg2_nxv4f16_nxv4i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f16_nxv4i8: @@ -11219,18 +11219,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i8( %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i8( %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i32(half*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i32(,, half*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i32(,, half*, , , i32, i32) define @test_vluxseg2_nxv4f16_nxv4i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f16_nxv4i32: @@ -11249,18 +11249,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i32( %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i32( %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i16(half*, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i16(,,, half*, , , i32) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i16(,,, half*, , , i32, i32) define @test_vluxseg3_nxv4f16_nxv4i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4f16_nxv4i16: @@ -11281,18 +11281,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i8(half*, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i8(,,, half*, , , i32) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i8(,,, half*, , , i32, i32) define @test_vluxseg3_nxv4f16_nxv4i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4f16_nxv4i8: @@ -11313,18 +11313,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i32(half*, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i32(,,, half*, , , i32) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i32(,,, half*, , , i32, i32) define @test_vluxseg3_nxv4f16_nxv4i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4f16_nxv4i32: @@ -11344,18 +11344,18 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i32( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i32( %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i16(half*, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i16(,,,, half*, , , i32) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i16(,,,, half*, , , i32, i32) define @test_vluxseg4_nxv4f16_nxv4i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4f16_nxv4i16: @@ -11377,18 +11377,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i8(half*, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i8(,,,, half*, , , i32) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i8(,,,, half*, , , i32, i32) define @test_vluxseg4_nxv4f16_nxv4i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4f16_nxv4i8: @@ -11410,18 +11410,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i32(half*, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i32(,,,, half*, , , i32) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i32(,,,, half*, , , i32, i32) define @test_vluxseg4_nxv4f16_nxv4i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4f16_nxv4i32: @@ -11443,18 +11443,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i16(half*, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i16(,,,,, half*, , , i32) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i16(,,,,, half*, , , i32, i32) define @test_vluxseg5_nxv4f16_nxv4i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4f16_nxv4i16: @@ -11477,18 +11477,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i8(half*, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i8(,,,,, half*, , , i32) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i8(,,,,, half*, , , i32, i32) define @test_vluxseg5_nxv4f16_nxv4i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4f16_nxv4i8: @@ -11511,18 +11511,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i32(half*, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i32(,,,,, half*, , , i32) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i32(,,,,, half*, , , i32, i32) define @test_vluxseg5_nxv4f16_nxv4i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4f16_nxv4i32: @@ -11545,18 +11545,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i16(half*, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i16(,,,,,, half*, , , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i16(,,,,,, half*, , , i32, i32) define @test_vluxseg6_nxv4f16_nxv4i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4f16_nxv4i16: @@ -11580,18 +11580,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i8(half*, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i8(,,,,,, half*, , , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i8(,,,,,, half*, , , i32, i32) define @test_vluxseg6_nxv4f16_nxv4i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4f16_nxv4i8: @@ -11615,18 +11615,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i32(half*, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i32(,,,,,, half*, , , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i32(,,,,,, half*, , , i32, i32) define @test_vluxseg6_nxv4f16_nxv4i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4f16_nxv4i32: @@ -11650,18 +11650,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i16(half*, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i16(,,,,,,, half*, , , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i16(,,,,,,, half*, , , i32, i32) define @test_vluxseg7_nxv4f16_nxv4i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4f16_nxv4i16: @@ -11686,18 +11686,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i8(half*, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i8(,,,,,,, half*, , , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i8(,,,,,,, half*, , , i32, i32) define @test_vluxseg7_nxv4f16_nxv4i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4f16_nxv4i8: @@ -11722,18 +11722,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i32(half*, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i32(,,,,,,, half*, , , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i32(,,,,,,, half*, , , i32, i32) define @test_vluxseg7_nxv4f16_nxv4i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4f16_nxv4i32: @@ -11758,18 +11758,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i16(half*, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i16(,,,,,,,, half*, , , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i16(,,,,,,,, half*, , , i32, i32) define @test_vluxseg8_nxv4f16_nxv4i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4f16_nxv4i16: @@ -11795,18 +11795,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i8(half*, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i8(,,,,,,,, half*, , , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i8(,,,,,,,, half*, , , i32, i32) define @test_vluxseg8_nxv4f16_nxv4i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4f16_nxv4i8: @@ -11832,18 +11832,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i32(half*, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i32(,,,,,,,, half*, , , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i32(,,,,,,,, half*, , , i32, i32) define @test_vluxseg8_nxv4f16_nxv4i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4f16_nxv4i32: @@ -11869,18 +11869,18 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv2f16.nxv2i32(half*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i32(,, half*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i32(,, half*, , , i32, i32) define @test_vluxseg2_nxv2f16_nxv2i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f16_nxv2i32: @@ -11899,18 +11899,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i32( %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i32( %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv2f16.nxv2i8(half*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i8(,, half*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i8(,, half*, , , i32, i32) define @test_vluxseg2_nxv2f16_nxv2i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f16_nxv2i8: @@ -11929,18 +11929,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i8( %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i8( %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv2f16.nxv2i16(half*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i16(,, half*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i16(,, half*, , , i32, i32) define @test_vluxseg2_nxv2f16_nxv2i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f16_nxv2i16: @@ -11959,18 +11959,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i16( %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i16( %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv2f16.nxv2i32(half*, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i32(,,, half*, , , i32) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i32(,,, half*, , , i32, i32) define @test_vluxseg3_nxv2f16_nxv2i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f16_nxv2i32: @@ -11991,18 +11991,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i32( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i32( %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv2f16.nxv2i8(half*, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i8(,,, half*, , , i32) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i8(,,, half*, , , i32, i32) define @test_vluxseg3_nxv2f16_nxv2i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f16_nxv2i8: @@ -12023,18 +12023,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv2f16.nxv2i16(half*, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i16(,,, half*, , , i32) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i16(,,, half*, , , i32, i32) define @test_vluxseg3_nxv2f16_nxv2i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f16_nxv2i16: @@ -12055,18 +12055,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv2f16.nxv2i32(half*, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i32(,,,, half*, , , i32) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i32(,,,, half*, , , i32, i32) define @test_vluxseg4_nxv2f16_nxv2i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f16_nxv2i32: @@ -12088,18 +12088,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv2f16.nxv2i8(half*, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i8(,,,, half*, , , i32) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i8(,,,, half*, , , i32, i32) define @test_vluxseg4_nxv2f16_nxv2i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f16_nxv2i8: @@ -12121,18 +12121,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv2f16.nxv2i16(half*, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i16(,,,, half*, , , i32) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i16(,,,, half*, , , i32, i32) define @test_vluxseg4_nxv2f16_nxv2i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f16_nxv2i16: @@ -12154,18 +12154,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv2f16.nxv2i32(half*, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i32(,,,,, half*, , , i32) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i32(,,,,, half*, , , i32, i32) define @test_vluxseg5_nxv2f16_nxv2i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2f16_nxv2i32: @@ -12188,18 +12188,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv2f16.nxv2i8(half*, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i8(,,,,, half*, , , i32) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i8(,,,,, half*, , , i32, i32) define @test_vluxseg5_nxv2f16_nxv2i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2f16_nxv2i8: @@ -12222,18 +12222,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv2f16.nxv2i16(half*, , i32) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i16(,,,,, half*, , , i32) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i16(,,,,, half*, , , i32, i32) define @test_vluxseg5_nxv2f16_nxv2i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2f16_nxv2i16: @@ -12256,18 +12256,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv2f16.nxv2i32(half*, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i32(,,,,,, half*, , , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i32(,,,,,, half*, , , i32, i32) define @test_vluxseg6_nxv2f16_nxv2i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2f16_nxv2i32: @@ -12291,18 +12291,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv2f16.nxv2i8(half*, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i8(,,,,,, half*, , , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i8(,,,,,, half*, , , i32, i32) define @test_vluxseg6_nxv2f16_nxv2i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2f16_nxv2i8: @@ -12326,18 +12326,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv2f16.nxv2i16(half*, , i32) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i16(,,,,,, half*, , , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i16(,,,,,, half*, , , i32, i32) define @test_vluxseg6_nxv2f16_nxv2i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2f16_nxv2i16: @@ -12361,18 +12361,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2f16.nxv2i32(half*, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i32(,,,,,,, half*, , , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i32(,,,,,,, half*, , , i32, i32) define @test_vluxseg7_nxv2f16_nxv2i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2f16_nxv2i32: @@ -12397,18 +12397,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2f16.nxv2i8(half*, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i8(,,,,,,, half*, , , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i8(,,,,,,, half*, , , i32, i32) define @test_vluxseg7_nxv2f16_nxv2i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2f16_nxv2i8: @@ -12433,18 +12433,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2f16.nxv2i16(half*, , i32) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i16(,,,,,,, half*, , , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i16(,,,,,,, half*, , , i32, i32) define @test_vluxseg7_nxv2f16_nxv2i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2f16_nxv2i16: @@ -12469,18 +12469,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f16.nxv2i32(half*, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i32(,,,,,,,, half*, , , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i32(,,,,,,,, half*, , , i32, i32) define @test_vluxseg8_nxv2f16_nxv2i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2f16_nxv2i32: @@ -12506,18 +12506,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f16.nxv2i8(half*, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i8(,,,,,,,, half*, , , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i8(,,,,,,,, half*, , , i32, i32) define @test_vluxseg8_nxv2f16_nxv2i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2f16_nxv2i8: @@ -12543,18 +12543,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f16.nxv2i16(half*, , i32) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i16(,,,,,,,, half*, , , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i16(,,,,,,,, half*, , , i32, i32) define @test_vluxseg8_nxv2f16_nxv2i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2f16_nxv2i16: @@ -12580,18 +12580,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i16(float*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i16(,, float*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i16(,, float*, , , i32, i32) define @test_vluxseg2_nxv4f32_nxv4i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f32_nxv4i16: @@ -12610,18 +12610,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i16( %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i16( %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i8(float*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i8(,, float*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i8(,, float*, , , i32, i32) define @test_vluxseg2_nxv4f32_nxv4i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f32_nxv4i8: @@ -12640,18 +12640,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i8( %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i8( %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i32(float*, , i32) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i32(,, float*, , , i32) +declare {,} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i32(,, float*, , , i32, i32) define @test_vluxseg2_nxv4f32_nxv4i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f32_nxv4i32: @@ -12670,18 +12670,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i32( %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i32( %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i16(float*, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i16(,,, float*, , , i32) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i16(,,, float*, , , i32, i32) define @test_vluxseg3_nxv4f32_nxv4i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4f32_nxv4i16: @@ -12702,18 +12702,18 @@ ; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i16( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i16( %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i8(float*, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i8(,,, float*, , , i32) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i8(,,, float*, , , i32, i32) define @test_vluxseg3_nxv4f32_nxv4i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4f32_nxv4i8: @@ -12734,18 +12734,18 @@ ; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i8( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i8( %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i32(float*, , i32) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i32(,,, float*, , , i32) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i32(,,, float*, , , i32, i32) define @test_vluxseg3_nxv4f32_nxv4i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4f32_nxv4i32: @@ -12766,18 +12766,18 @@ ; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i32( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i32( %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i16(float*, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i16(,,,, float*, , , i32) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i16(,,,, float*, , , i32, i32) define @test_vluxseg4_nxv4f32_nxv4i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4f32_nxv4i16: @@ -12799,18 +12799,18 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i16( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i16( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i8(float*, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i8(,,,, float*, , , i32) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i8(,,,, float*, , , i32, i32) define @test_vluxseg4_nxv4f32_nxv4i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4f32_nxv4i8: @@ -12832,18 +12832,18 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i8( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i8( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i32(float*, , i32) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i32(,,,, float*, , , i32) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i32(,,,, float*, , , i32, i32) define @test_vluxseg4_nxv4f32_nxv4i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4f32_nxv4i32: @@ -12865,12 +12865,12 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i32( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i32( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) %1 = extractvalue {,,,} %0, 1 ret %1 } diff --git a/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv64.ll @@ -3,7 +3,7 @@ ; RUN: -verify-machineinstrs < %s | FileCheck %s declare {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i16(i16*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i16(,, i16*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i16(,, i16*, , , i64, i64) define @test_vluxseg2_nxv16i16_nxv16i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16i16_nxv16i16: @@ -22,18 +22,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i16( %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i16( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i8(i16*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i8(,, i16*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i8(,, i16*, , , i64, i64) define @test_vluxseg2_nxv16i16_nxv16i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16i16_nxv16i8: @@ -52,18 +52,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i8( %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i8( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i32(i16*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i32(,, i16*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i32(,, i16*, , , i64, i64) define @test_vluxseg2_nxv16i16_nxv16i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16i16_nxv16i32: @@ -82,18 +82,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v4, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i32( %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i32( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i32(i32*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i32(,, i32*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i32(,, i32*, , , i64, i64) define @test_vluxseg2_nxv4i32_nxv4i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i32_nxv4i32: @@ -112,18 +112,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i32( %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i32( %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i8(i32*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i8(,, i32*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i8(,, i32*, , , i64, i64) define @test_vluxseg2_nxv4i32_nxv4i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i32_nxv4i8: @@ -142,18 +142,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i8( %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i8( %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i64(i32*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i64(,, i32*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i64(,, i32*, , , i64, i64) define @test_vluxseg2_nxv4i32_nxv4i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i32_nxv4i64: @@ -172,18 +172,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v6, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i64( %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i64( %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i16(i32*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i16(,, i32*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i16(,, i32*, , , i64, i64) define @test_vluxseg2_nxv4i32_nxv4i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i32_nxv4i16: @@ -202,18 +202,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i16( %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i16( %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i32(i32*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i32(,,, i32*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i32(,,, i32*, , , i64, i64) define @test_vluxseg3_nxv4i32_nxv4i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i32_nxv4i32: @@ -234,18 +234,18 @@ ; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i32( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i32( %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i8(i32*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i8(,,, i32*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i8(,,, i32*, , , i64, i64) define @test_vluxseg3_nxv4i32_nxv4i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i32_nxv4i8: @@ -266,18 +266,18 @@ ; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i8( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i8( %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i64(i32*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i64(,,, i32*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i64(,,, i32*, , , i64, i64) define @test_vluxseg3_nxv4i32_nxv4i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i32_nxv4i64: @@ -297,18 +297,18 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg3ei64.v v6, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i64( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i64( %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i16(i32*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i16(,,, i32*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i16(,,, i32*, , , i64, i64) define @test_vluxseg3_nxv4i32_nxv4i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i32_nxv4i16: @@ -329,18 +329,18 @@ ; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i16( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i16( %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i32(i32*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i32(,,,, i32*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i32(,,,, i32*, , , i64, i64) define @test_vluxseg4_nxv4i32_nxv4i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i32_nxv4i32: @@ -362,18 +362,18 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i32( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i32( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i8(i32*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i8(,,,, i32*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i8(,,,, i32*, , , i64, i64) define @test_vluxseg4_nxv4i32_nxv4i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i32_nxv4i8: @@ -395,18 +395,18 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i8( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i8( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i64(i32*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i64(,,,, i32*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i64(,,,, i32*, , , i64, i64) define @test_vluxseg4_nxv4i32_nxv4i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i32_nxv4i64: @@ -428,18 +428,18 @@ ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg4ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i64( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i64( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i16(i32*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i16(,,,, i32*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i16(,,,, i32*, , , i64, i64) define @test_vluxseg4_nxv4i32_nxv4i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i32_nxv4i16: @@ -461,18 +461,18 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i16( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i16( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv16i8.nxv16i16(i8*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i16(,, i8*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i16(,, i8*, , , i64, i64) define @test_vluxseg2_nxv16i8_nxv16i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16i8_nxv16i16: @@ -491,18 +491,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i16( %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i16( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv16i8.nxv16i8(i8*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i8(,, i8*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i8(,, i8*, , , i64, i64) define @test_vluxseg2_nxv16i8_nxv16i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16i8_nxv16i8: @@ -521,18 +521,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i8( %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i8( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv16i8.nxv16i32(i8*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i32(,, i8*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i32(,, i8*, , , i64, i64) define @test_vluxseg2_nxv16i8_nxv16i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16i8_nxv16i32: @@ -551,18 +551,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i32( %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i32( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv16i8.nxv16i16(i8*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i16(,,, i8*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i16(,,, i8*, , , i64, i64) define @test_vluxseg3_nxv16i8_nxv16i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv16i8_nxv16i16: @@ -582,18 +582,18 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v6, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv16i8.nxv16i8(i8*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i8(,,, i8*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i8(,,, i8*, , , i64, i64) define @test_vluxseg3_nxv16i8_nxv16i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv16i8_nxv16i8: @@ -614,18 +614,18 @@ ; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv16i8.nxv16i32(i8*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i32(,,, i8*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i32(,,, i8*, , , i64, i64) define @test_vluxseg3_nxv16i8_nxv16i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv16i8_nxv16i32: @@ -645,18 +645,18 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v6, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv16i8.nxv16i16(i8*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i16(,,,, i8*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i16(,,,, i8*, , , i64, i64) define @test_vluxseg4_nxv16i8_nxv16i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv16i8_nxv16i16: @@ -678,18 +678,18 @@ ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v16, (a0), v12, v0.t ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv16i8.nxv16i8(i8*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i8(,,,, i8*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i8(,,,, i8*, , , i64, i64) define @test_vluxseg4_nxv16i8_nxv16i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv16i8_nxv16i8: @@ -711,18 +711,18 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv16i8.nxv16i32(i8*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i32(,,,, i8*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i32(,,,, i8*, , , i64, i64) define @test_vluxseg4_nxv16i8_nxv16i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv16i8_nxv16i32: @@ -743,18 +743,18 @@ ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v6, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv1i64.nxv1i64(i64*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i64(,, i64*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i64(,, i64*, , , i64, i64) define @test_vluxseg2_nxv1i64_nxv1i64(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i64_nxv1i64: @@ -773,18 +773,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i64( %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i64( %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv1i64.nxv1i32(i64*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i32(,, i64*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i32(,, i64*, , , i64, i64) define @test_vluxseg2_nxv1i64_nxv1i32(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i64_nxv1i32: @@ -803,18 +803,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i32( %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i32( %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv1i64.nxv1i16(i64*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i16(,, i64*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i16(,, i64*, , , i64, i64) define @test_vluxseg2_nxv1i64_nxv1i16(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i64_nxv1i16: @@ -833,18 +833,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i16( %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i16( %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv1i64.nxv1i8(i64*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i8(,, i64*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i8(,, i64*, , , i64, i64) define @test_vluxseg2_nxv1i64_nxv1i8(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i64_nxv1i8: @@ -863,18 +863,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i8( %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i8( %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv1i64.nxv1i64(i64*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i64(,,, i64*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i64(,,, i64*, , , i64, i64) define @test_vluxseg3_nxv1i64_nxv1i64(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i64_nxv1i64: @@ -895,18 +895,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg3ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i64( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i64( %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv1i64.nxv1i32(i64*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i32(,,, i64*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i32(,,, i64*, , , i64, i64) define @test_vluxseg3_nxv1i64_nxv1i32(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i64_nxv1i32: @@ -927,18 +927,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i32( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i32( %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv1i64.nxv1i16(i64*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i16(,,, i64*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i16(,,, i64*, , , i64, i64) define @test_vluxseg3_nxv1i64_nxv1i16(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i64_nxv1i16: @@ -959,18 +959,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i16( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i16( %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv1i64.nxv1i8(i64*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i8(,,, i64*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i8(,,, i64*, , , i64, i64) define @test_vluxseg3_nxv1i64_nxv1i8(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i64_nxv1i8: @@ -991,18 +991,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i8( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i8( %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv1i64.nxv1i64(i64*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i64(,,,, i64*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i64(,,,, i64*, , , i64, i64) define @test_vluxseg4_nxv1i64_nxv1i64(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i64_nxv1i64: @@ -1024,18 +1024,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg4ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv1i64.nxv1i32(i64*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i32(,,,, i64*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i32(,,,, i64*, , , i64, i64) define @test_vluxseg4_nxv1i64_nxv1i32(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i64_nxv1i32: @@ -1057,18 +1057,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv1i64.nxv1i16(i64*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i16(,,,, i64*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i16(,,,, i64*, , , i64, i64) define @test_vluxseg4_nxv1i64_nxv1i16(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i64_nxv1i16: @@ -1090,18 +1090,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv1i64.nxv1i8(i64*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i8(,,,, i64*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i8(,,,, i64*, , , i64, i64) define @test_vluxseg4_nxv1i64_nxv1i8(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i64_nxv1i8: @@ -1123,18 +1123,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv1i64.nxv1i64(i64*, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i64(,,,,, i64*, , , i64) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i64(,,,,, i64*, , , i64, i64) define @test_vluxseg5_nxv1i64_nxv1i64(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i64_nxv1i64: @@ -1157,18 +1157,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg5ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv1i64.nxv1i32(i64*, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i32(,,,,, i64*, , , i64) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i32(,,,,, i64*, , , i64, i64) define @test_vluxseg5_nxv1i64_nxv1i32(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i64_nxv1i32: @@ -1191,18 +1191,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv1i64.nxv1i16(i64*, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i16(,,,,, i64*, , , i64) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i16(,,,,, i64*, , , i64, i64) define @test_vluxseg5_nxv1i64_nxv1i16(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i64_nxv1i16: @@ -1225,18 +1225,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv1i64.nxv1i8(i64*, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i8(,,,,, i64*, , , i64) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i8(,,,,, i64*, , , i64, i64) define @test_vluxseg5_nxv1i64_nxv1i8(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i64_nxv1i8: @@ -1259,18 +1259,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i64.nxv1i64(i64*, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i64(,,,,,, i64*, , , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i64(,,,,,, i64*, , , i64, i64) define @test_vluxseg6_nxv1i64_nxv1i64(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i64_nxv1i64: @@ -1294,18 +1294,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg6ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i64.nxv1i32(i64*, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i32(,,,,,, i64*, , , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i32(,,,,,, i64*, , , i64, i64) define @test_vluxseg6_nxv1i64_nxv1i32(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i64_nxv1i32: @@ -1329,18 +1329,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i64.nxv1i16(i64*, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i16(,,,,,, i64*, , , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i16(,,,,,, i64*, , , i64, i64) define @test_vluxseg6_nxv1i64_nxv1i16(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i64_nxv1i16: @@ -1364,18 +1364,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i64.nxv1i8(i64*, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i8(,,,,,, i64*, , , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i8(,,,,,, i64*, , , i64, i64) define @test_vluxseg6_nxv1i64_nxv1i8(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i64_nxv1i8: @@ -1399,18 +1399,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i64.nxv1i64(i64*, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i64(,,,,,,, i64*, , , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i64(,,,,,,, i64*, , , i64, i64) define @test_vluxseg7_nxv1i64_nxv1i64(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i64_nxv1i64: @@ -1435,18 +1435,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg7ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i64.nxv1i32(i64*, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i32(,,,,,,, i64*, , , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i32(,,,,,,, i64*, , , i64, i64) define @test_vluxseg7_nxv1i64_nxv1i32(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i64_nxv1i32: @@ -1471,18 +1471,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i64.nxv1i16(i64*, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i16(,,,,,,, i64*, , , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i16(,,,,,,, i64*, , , i64, i64) define @test_vluxseg7_nxv1i64_nxv1i16(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i64_nxv1i16: @@ -1507,18 +1507,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i64.nxv1i8(i64*, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i8(,,,,,,, i64*, , , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i8(,,,,,,, i64*, , , i64, i64) define @test_vluxseg7_nxv1i64_nxv1i8(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i64_nxv1i8: @@ -1543,18 +1543,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i64.nxv1i64(i64*, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i64(,,,,,,,, i64*, , , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i64(,,,,,,,, i64*, , , i64, i64) define @test_vluxseg8_nxv1i64_nxv1i64(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i64_nxv1i64: @@ -1580,18 +1580,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i64.nxv1i32(i64*, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i32(,,,,,,,, i64*, , , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i32(,,,,,,,, i64*, , , i64, i64) define @test_vluxseg8_nxv1i64_nxv1i32(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i64_nxv1i32: @@ -1617,18 +1617,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i64.nxv1i16(i64*, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i16(,,,,,,,, i64*, , , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i16(,,,,,,,, i64*, , , i64, i64) define @test_vluxseg8_nxv1i64_nxv1i16(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i64_nxv1i16: @@ -1654,18 +1654,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i64.nxv1i8(i64*, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i8(,,,,,,,, i64*, , , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i8(,,,,,,,, i64*, , , i64, i64) define @test_vluxseg8_nxv1i64_nxv1i8(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i64_nxv1i8: @@ -1691,18 +1691,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv1i32.nxv1i64(i32*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i64(,, i32*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i64(,, i32*, , , i64, i64) define @test_vluxseg2_nxv1i32_nxv1i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i32_nxv1i64: @@ -1721,18 +1721,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i64( %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i64( %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv1i32.nxv1i32(i32*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32(,, i32*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32(,, i32*, , , i64, i64) define @test_vluxseg2_nxv1i32_nxv1i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i32_nxv1i32: @@ -1751,18 +1751,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32( %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32( %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv1i32.nxv1i16(i32*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i16(,, i32*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i16(,, i32*, , , i64, i64) define @test_vluxseg2_nxv1i32_nxv1i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i32_nxv1i16: @@ -1781,18 +1781,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i16( %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i16( %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv1i32.nxv1i8(i32*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i8(,, i32*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i8(,, i32*, , , i64, i64) define @test_vluxseg2_nxv1i32_nxv1i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i32_nxv1i8: @@ -1811,18 +1811,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i8( %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i8( %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv1i32.nxv1i64(i32*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i64(,,, i32*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i64(,,, i32*, , , i64, i64) define @test_vluxseg3_nxv1i32_nxv1i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i32_nxv1i64: @@ -1843,18 +1843,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i64( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i64( %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv1i32.nxv1i32(i32*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i32(,,, i32*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i32(,,, i32*, , , i64, i64) define @test_vluxseg3_nxv1i32_nxv1i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i32_nxv1i32: @@ -1875,18 +1875,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i32( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i32( %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv1i32.nxv1i16(i32*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i16(,,, i32*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i16(,,, i32*, , , i64, i64) define @test_vluxseg3_nxv1i32_nxv1i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i32_nxv1i16: @@ -1907,18 +1907,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i16( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i16( %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv1i32.nxv1i8(i32*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i8(,,, i32*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i8(,,, i32*, , , i64, i64) define @test_vluxseg3_nxv1i32_nxv1i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i32_nxv1i8: @@ -1939,18 +1939,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i8( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i8( %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv1i32.nxv1i64(i32*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i64(,,,, i32*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i64(,,,, i32*, , , i64, i64) define @test_vluxseg4_nxv1i32_nxv1i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i32_nxv1i64: @@ -1972,18 +1972,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv1i32.nxv1i32(i32*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i32(,,,, i32*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i32(,,,, i32*, , , i64, i64) define @test_vluxseg4_nxv1i32_nxv1i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i32_nxv1i32: @@ -2005,18 +2005,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv1i32.nxv1i16(i32*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i16(,,,, i32*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i16(,,,, i32*, , , i64, i64) define @test_vluxseg4_nxv1i32_nxv1i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i32_nxv1i16: @@ -2038,18 +2038,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv1i32.nxv1i8(i32*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i8(,,,, i32*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i8(,,,, i32*, , , i64, i64) define @test_vluxseg4_nxv1i32_nxv1i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i32_nxv1i8: @@ -2071,18 +2071,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv1i32.nxv1i64(i32*, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i64(,,,,, i32*, , , i64) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i64(,,,,, i32*, , , i64, i64) define @test_vluxseg5_nxv1i32_nxv1i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i32_nxv1i64: @@ -2105,18 +2105,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv1i32.nxv1i32(i32*, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i32(,,,,, i32*, , , i64) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i32(,,,,, i32*, , , i64, i64) define @test_vluxseg5_nxv1i32_nxv1i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i32_nxv1i32: @@ -2139,18 +2139,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv1i32.nxv1i16(i32*, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i16(,,,,, i32*, , , i64) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i16(,,,,, i32*, , , i64, i64) define @test_vluxseg5_nxv1i32_nxv1i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i32_nxv1i16: @@ -2173,18 +2173,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv1i32.nxv1i8(i32*, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i8(,,,,, i32*, , , i64) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i8(,,,,, i32*, , , i64, i64) define @test_vluxseg5_nxv1i32_nxv1i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i32_nxv1i8: @@ -2207,18 +2207,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i32.nxv1i64(i32*, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i64(,,,,,, i32*, , , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i64(,,,,,, i32*, , , i64, i64) define @test_vluxseg6_nxv1i32_nxv1i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i32_nxv1i64: @@ -2242,18 +2242,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i32.nxv1i32(i32*, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i32(,,,,,, i32*, , , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i32(,,,,,, i32*, , , i64, i64) define @test_vluxseg6_nxv1i32_nxv1i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i32_nxv1i32: @@ -2277,18 +2277,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i32.nxv1i16(i32*, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i16(,,,,,, i32*, , , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i16(,,,,,, i32*, , , i64, i64) define @test_vluxseg6_nxv1i32_nxv1i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i32_nxv1i16: @@ -2312,18 +2312,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i32.nxv1i8(i32*, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i8(,,,,,, i32*, , , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i8(,,,,,, i32*, , , i64, i64) define @test_vluxseg6_nxv1i32_nxv1i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i32_nxv1i8: @@ -2347,18 +2347,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i32.nxv1i64(i32*, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i64(,,,,,,, i32*, , , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i64(,,,,,,, i32*, , , i64, i64) define @test_vluxseg7_nxv1i32_nxv1i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i32_nxv1i64: @@ -2383,18 +2383,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i32.nxv1i32(i32*, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i32(,,,,,,, i32*, , , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i32(,,,,,,, i32*, , , i64, i64) define @test_vluxseg7_nxv1i32_nxv1i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i32_nxv1i32: @@ -2419,18 +2419,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i32.nxv1i16(i32*, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i16(,,,,,,, i32*, , , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i16(,,,,,,, i32*, , , i64, i64) define @test_vluxseg7_nxv1i32_nxv1i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i32_nxv1i16: @@ -2455,18 +2455,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i32.nxv1i8(i32*, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i8(,,,,,,, i32*, , , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i8(,,,,,,, i32*, , , i64, i64) define @test_vluxseg7_nxv1i32_nxv1i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i32_nxv1i8: @@ -2491,18 +2491,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i32.nxv1i64(i32*, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i64(,,,,,,,, i32*, , , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i64(,,,,,,,, i32*, , , i64, i64) define @test_vluxseg8_nxv1i32_nxv1i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i32_nxv1i64: @@ -2528,18 +2528,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i32.nxv1i32(i32*, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i32(,,,,,,,, i32*, , , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i32(,,,,,,,, i32*, , , i64, i64) define @test_vluxseg8_nxv1i32_nxv1i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i32_nxv1i32: @@ -2565,18 +2565,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i32.nxv1i16(i32*, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i16(,,,,,,,, i32*, , , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i16(,,,,,,,, i32*, , , i64, i64) define @test_vluxseg8_nxv1i32_nxv1i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i32_nxv1i16: @@ -2602,18 +2602,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i32.nxv1i8(i32*, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i8(,,,,,,,, i32*, , , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i8(,,,,,,,, i32*, , , i64, i64) define @test_vluxseg8_nxv1i32_nxv1i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i32_nxv1i8: @@ -2639,18 +2639,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i16(i16*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i16(,, i16*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i16(,, i16*, , , i64, i64) define @test_vluxseg2_nxv8i16_nxv8i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i16_nxv8i16: @@ -2669,18 +2669,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i16( %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i16( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i8(i16*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i8(,, i16*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i8(,, i16*, , , i64, i64) define @test_vluxseg2_nxv8i16_nxv8i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i16_nxv8i8: @@ -2699,18 +2699,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i8( %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i8( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i64(i16*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i64(,, i16*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i64(,, i16*, , , i64, i64) define @test_vluxseg2_nxv8i16_nxv8i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i16_nxv8i64: @@ -2729,18 +2729,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv8i16_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v6, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i64( %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i64( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i32(i16*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i32(,, i16*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i32(,, i16*, , , i64, i64) define @test_vluxseg2_nxv8i16_nxv8i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i16_nxv8i32: @@ -2759,18 +2759,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i32( %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i32( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i16(i16*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i16(,,, i16*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i16(,,, i16*, , , i64, i64) define @test_vluxseg3_nxv8i16_nxv8i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8i16_nxv8i16: @@ -2791,18 +2791,18 @@ ; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i8(i16*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i8(,,, i16*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i8(,,, i16*, , , i64, i64) define @test_vluxseg3_nxv8i16_nxv8i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8i16_nxv8i8: @@ -2823,18 +2823,18 @@ ; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i64(i16*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i64(,,, i16*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i64(,,, i16*, , , i64, i64) define @test_vluxseg3_nxv8i16_nxv8i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8i16_nxv8i64: @@ -2854,18 +2854,18 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg3ei64.v v6, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i64( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i64( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i32(i16*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i32(,,, i16*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i32(,,, i16*, , , i64, i64) define @test_vluxseg3_nxv8i16_nxv8i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8i16_nxv8i32: @@ -2885,18 +2885,18 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v6, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i32( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i32( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i16(i16*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i16(,,,, i16*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i16(,,,, i16*, , , i64, i64) define @test_vluxseg4_nxv8i16_nxv8i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8i16_nxv8i16: @@ -2918,18 +2918,18 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i8(i16*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i8(,,,, i16*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i8(,,,, i16*, , , i64, i64) define @test_vluxseg4_nxv8i16_nxv8i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8i16_nxv8i8: @@ -2951,18 +2951,18 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i64(i16*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i64(,,,, i16*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i64(,,,, i16*, , , i64, i64) define @test_vluxseg4_nxv8i16_nxv8i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8i16_nxv8i64: @@ -2983,18 +2983,18 @@ ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg4ei64.v v6, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i64( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i64( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i32(i16*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i32(,,,, i16*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i32(,,,, i16*, , , i64, i64) define @test_vluxseg4_nxv8i16_nxv8i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8i16_nxv8i32: @@ -3016,18 +3016,18 @@ ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv4i8.nxv4i32(i8*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i32(,, i8*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i32(,, i8*, , , i64, i64) define @test_vluxseg2_nxv4i8_nxv4i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i8_nxv4i32: @@ -3046,18 +3046,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i32( %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i32( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv4i8.nxv4i8(i8*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i8(,, i8*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i8(,, i8*, , , i64, i64) define @test_vluxseg2_nxv4i8_nxv4i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i8_nxv4i8: @@ -3076,18 +3076,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i8( %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i8( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv4i8.nxv4i64(i8*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i64(,, i8*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i64(,, i8*, , , i64, i64) define @test_vluxseg2_nxv4i8_nxv4i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i8_nxv4i64: @@ -3106,18 +3106,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i64( %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i64( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv4i8.nxv4i16(i8*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i16(,, i8*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i16(,, i8*, , , i64, i64) define @test_vluxseg2_nxv4i8_nxv4i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i8_nxv4i16: @@ -3136,18 +3136,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i16( %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i16( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv4i8.nxv4i32(i8*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i32(,,, i8*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i32(,,, i8*, , , i64, i64) define @test_vluxseg3_nxv4i8_nxv4i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i8_nxv4i32: @@ -3167,18 +3167,18 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv4i8.nxv4i8(i8*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i8(,,, i8*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i8(,,, i8*, , , i64, i64) define @test_vluxseg3_nxv4i8_nxv4i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i8_nxv4i8: @@ -3199,18 +3199,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv4i8.nxv4i64(i8*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i64(,,, i8*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i64(,,, i8*, , , i64, i64) define @test_vluxseg3_nxv4i8_nxv4i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i8_nxv4i64: @@ -3230,18 +3230,18 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i64( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i64( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv4i8.nxv4i16(i8*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i16(,,, i8*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i16(,,, i8*, , , i64, i64) define @test_vluxseg3_nxv4i8_nxv4i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i8_nxv4i16: @@ -3262,18 +3262,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv4i8.nxv4i32(i8*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i32(,,,, i8*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i32(,,,, i8*, , , i64, i64) define @test_vluxseg4_nxv4i8_nxv4i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i8_nxv4i32: @@ -3295,18 +3295,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv4i8.nxv4i8(i8*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i8(,,,, i8*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i8(,,,, i8*, , , i64, i64) define @test_vluxseg4_nxv4i8_nxv4i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i8_nxv4i8: @@ -3328,18 +3328,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv4i8.nxv4i64(i8*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i64(,,,, i8*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i64(,,,, i8*, , , i64, i64) define @test_vluxseg4_nxv4i8_nxv4i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i8_nxv4i64: @@ -3360,18 +3360,18 @@ ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei64.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv4i8.nxv4i16(i8*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i16(,,,, i8*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i16(,,,, i8*, , , i64, i64) define @test_vluxseg4_nxv4i8_nxv4i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i8_nxv4i16: @@ -3393,18 +3393,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv4i8.nxv4i32(i8*, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i32(,,,,, i8*, , , i64) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i32(,,,,, i8*, , , i64, i64) define @test_vluxseg5_nxv4i8_nxv4i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4i8_nxv4i32: @@ -3427,18 +3427,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv4i8.nxv4i8(i8*, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i8(,,,,, i8*, , , i64) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i8(,,,,, i8*, , , i64, i64) define @test_vluxseg5_nxv4i8_nxv4i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4i8_nxv4i8: @@ -3461,18 +3461,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv4i8.nxv4i64(i8*, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i64(,,,,, i8*, , , i64) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i64(,,,,, i8*, , , i64, i64) define @test_vluxseg5_nxv4i8_nxv4i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4i8_nxv4i64: @@ -3494,18 +3494,18 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei64.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv4i8.nxv4i16(i8*, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i16(,,,,, i8*, , , i64) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i16(,,,,, i8*, , , i64, i64) define @test_vluxseg5_nxv4i8_nxv4i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4i8_nxv4i16: @@ -3528,18 +3528,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv4i8.nxv4i32(i8*, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i32(,,,,,, i8*, , , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i32(,,,,,, i8*, , , i64, i64) define @test_vluxseg6_nxv4i8_nxv4i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4i8_nxv4i32: @@ -3563,18 +3563,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv4i8.nxv4i8(i8*, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i8(,,,,,, i8*, , , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i8(,,,,,, i8*, , , i64, i64) define @test_vluxseg6_nxv4i8_nxv4i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4i8_nxv4i8: @@ -3598,18 +3598,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv4i8.nxv4i64(i8*, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i64(,,,,,, i8*, , , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i64(,,,,,, i8*, , , i64, i64) define @test_vluxseg6_nxv4i8_nxv4i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4i8_nxv4i64: @@ -3633,18 +3633,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei64.v v1, (a0), v12, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv4i8.nxv4i16(i8*, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i16(,,,,,, i8*, , , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i16(,,,,,, i8*, , , i64, i64) define @test_vluxseg6_nxv4i8_nxv4i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4i8_nxv4i16: @@ -3668,18 +3668,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4i8.nxv4i32(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i32(,,,,,,, i8*, , , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i32(,,,,,,, i8*, , , i64, i64) define @test_vluxseg7_nxv4i8_nxv4i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4i8_nxv4i32: @@ -3704,18 +3704,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4i8.nxv4i8(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i8(,,,,,,, i8*, , , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i8(,,,,,,, i8*, , , i64, i64) define @test_vluxseg7_nxv4i8_nxv4i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4i8_nxv4i8: @@ -3740,18 +3740,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4i8.nxv4i64(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i64(,,,,,,, i8*, , , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i64(,,,,,,, i8*, , , i64, i64) define @test_vluxseg7_nxv4i8_nxv4i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4i8_nxv4i64: @@ -3776,18 +3776,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei64.v v1, (a0), v12, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4i8.nxv4i16(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i16(,,,,,,, i8*, , , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i16(,,,,,,, i8*, , , i64, i64) define @test_vluxseg7_nxv4i8_nxv4i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4i8_nxv4i16: @@ -3812,18 +3812,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i8.nxv4i32(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i32(,,,,,,,, i8*, , , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i32(,,,,,,,, i8*, , , i64, i64) define @test_vluxseg8_nxv4i8_nxv4i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4i8_nxv4i32: @@ -3849,18 +3849,18 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i8.nxv4i8(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i8(,,,,,,,, i8*, , , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i8(,,,,,,,, i8*, , , i64, i64) define @test_vluxseg8_nxv4i8_nxv4i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4i8_nxv4i8: @@ -3886,18 +3886,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i8.nxv4i64(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i64(,,,,,,,, i8*, , , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i64(,,,,,,,, i8*, , , i64, i64) define @test_vluxseg8_nxv4i8_nxv4i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4i8_nxv4i64: @@ -3923,18 +3923,18 @@ ; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vmv1r.v v22, v16 ; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i8.nxv4i16(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i16(,,,,,,,, i8*, , , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i16(,,,,,,,, i8*, , , i64, i64) define @test_vluxseg8_nxv4i8_nxv4i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4i8_nxv4i16: @@ -3960,18 +3960,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv1i16.nxv1i64(i16*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i64(,, i16*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i64(,, i16*, , , i64, i64) define @test_vluxseg2_nxv1i16_nxv1i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i16_nxv1i64: @@ -3990,18 +3990,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i64( %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i64( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv1i16.nxv1i32(i16*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i32(,, i16*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i32(,, i16*, , , i64, i64) define @test_vluxseg2_nxv1i16_nxv1i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i16_nxv1i32: @@ -4020,18 +4020,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i32( %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i32( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv1i16.nxv1i16(i16*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i16(,, i16*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i16(,, i16*, , , i64, i64) define @test_vluxseg2_nxv1i16_nxv1i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i16_nxv1i16: @@ -4050,18 +4050,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i16( %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i16( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv1i16.nxv1i8(i16*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i8(,, i16*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i8(,, i16*, , , i64, i64) define @test_vluxseg2_nxv1i16_nxv1i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i16_nxv1i8: @@ -4080,18 +4080,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i8( %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i8( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv1i16.nxv1i64(i16*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i64(,,, i16*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i64(,,, i16*, , , i64, i64) define @test_vluxseg3_nxv1i16_nxv1i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i16_nxv1i64: @@ -4112,18 +4112,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg3ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i64( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i64( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv1i16.nxv1i32(i16*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i32(,,, i16*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i32(,,, i16*, , , i64, i64) define @test_vluxseg3_nxv1i16_nxv1i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i16_nxv1i32: @@ -4144,18 +4144,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i32( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i32( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv1i16.nxv1i16(i16*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i16(,,, i16*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i16(,,, i16*, , , i64, i64) define @test_vluxseg3_nxv1i16_nxv1i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i16_nxv1i16: @@ -4176,18 +4176,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv1i16.nxv1i8(i16*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i8(,,, i16*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i8(,,, i16*, , , i64, i64) define @test_vluxseg3_nxv1i16_nxv1i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i16_nxv1i8: @@ -4208,18 +4208,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv1i16.nxv1i64(i16*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i64(,,,, i16*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i64(,,,, i16*, , , i64, i64) define @test_vluxseg4_nxv1i16_nxv1i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i16_nxv1i64: @@ -4241,18 +4241,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg4ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv1i16.nxv1i32(i16*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i32(,,,, i16*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i32(,,,, i16*, , , i64, i64) define @test_vluxseg4_nxv1i16_nxv1i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i16_nxv1i32: @@ -4274,18 +4274,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv1i16.nxv1i16(i16*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i16(,,,, i16*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i16(,,,, i16*, , , i64, i64) define @test_vluxseg4_nxv1i16_nxv1i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i16_nxv1i16: @@ -4307,18 +4307,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv1i16.nxv1i8(i16*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i8(,,,, i16*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i8(,,,, i16*, , , i64, i64) define @test_vluxseg4_nxv1i16_nxv1i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i16_nxv1i8: @@ -4340,18 +4340,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv1i16.nxv1i64(i16*, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i64(,,,,, i16*, , , i64) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i64(,,,,, i16*, , , i64, i64) define @test_vluxseg5_nxv1i16_nxv1i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i16_nxv1i64: @@ -4374,18 +4374,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg5ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv1i16.nxv1i32(i16*, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i32(,,,,, i16*, , , i64) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i32(,,,,, i16*, , , i64, i64) define @test_vluxseg5_nxv1i16_nxv1i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i16_nxv1i32: @@ -4408,18 +4408,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv1i16.nxv1i16(i16*, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i16(,,,,, i16*, , , i64) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i16(,,,,, i16*, , , i64, i64) define @test_vluxseg5_nxv1i16_nxv1i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i16_nxv1i16: @@ -4442,18 +4442,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv1i16.nxv1i8(i16*, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i8(,,,,, i16*, , , i64) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i8(,,,,, i16*, , , i64, i64) define @test_vluxseg5_nxv1i16_nxv1i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i16_nxv1i8: @@ -4476,18 +4476,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i16.nxv1i64(i16*, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i64(,,,,,, i16*, , , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i64(,,,,,, i16*, , , i64, i64) define @test_vluxseg6_nxv1i16_nxv1i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i16_nxv1i64: @@ -4511,18 +4511,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg6ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i16.nxv1i32(i16*, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i32(,,,,,, i16*, , , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i32(,,,,,, i16*, , , i64, i64) define @test_vluxseg6_nxv1i16_nxv1i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i16_nxv1i32: @@ -4546,18 +4546,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i16.nxv1i16(i16*, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i16(,,,,,, i16*, , , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i16(,,,,,, i16*, , , i64, i64) define @test_vluxseg6_nxv1i16_nxv1i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i16_nxv1i16: @@ -4581,18 +4581,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i16.nxv1i8(i16*, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i8(,,,,,, i16*, , , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i8(,,,,,, i16*, , , i64, i64) define @test_vluxseg6_nxv1i16_nxv1i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i16_nxv1i8: @@ -4616,18 +4616,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i16.nxv1i64(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i64(,,,,,,, i16*, , , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i64(,,,,,,, i16*, , , i64, i64) define @test_vluxseg7_nxv1i16_nxv1i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i16_nxv1i64: @@ -4652,18 +4652,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg7ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i16.nxv1i32(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i32(,,,,,,, i16*, , , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i32(,,,,,,, i16*, , , i64, i64) define @test_vluxseg7_nxv1i16_nxv1i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i16_nxv1i32: @@ -4688,18 +4688,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i16.nxv1i16(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i16(,,,,,,, i16*, , , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i16(,,,,,,, i16*, , , i64, i64) define @test_vluxseg7_nxv1i16_nxv1i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i16_nxv1i16: @@ -4724,18 +4724,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i16.nxv1i8(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i8(,,,,,,, i16*, , , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i8(,,,,,,, i16*, , , i64, i64) define @test_vluxseg7_nxv1i16_nxv1i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i16_nxv1i8: @@ -4760,18 +4760,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i16.nxv1i64(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i64(,,,,,,,, i16*, , , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i64(,,,,,,,, i16*, , , i64, i64) define @test_vluxseg8_nxv1i16_nxv1i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i16_nxv1i64: @@ -4797,18 +4797,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i16.nxv1i32(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i32(,,,,,,,, i16*, , , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i32(,,,,,,,, i16*, , , i64, i64) define @test_vluxseg8_nxv1i16_nxv1i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i16_nxv1i32: @@ -4834,18 +4834,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i16.nxv1i16(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i16(,,,,,,,, i16*, , , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i16(,,,,,,,, i16*, , , i64, i64) define @test_vluxseg8_nxv1i16_nxv1i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i16_nxv1i16: @@ -4871,18 +4871,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i16.nxv1i8(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i8(,,,,,,,, i16*, , , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i8(,,,,,,,, i16*, , , i64, i64) define @test_vluxseg8_nxv1i16_nxv1i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i16_nxv1i8: @@ -4908,18 +4908,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i32(i32*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i32(,, i32*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i32(,, i32*, , , i64, i64) define @test_vluxseg2_nxv2i32_nxv2i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i32_nxv2i32: @@ -4938,18 +4938,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i32( %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i32( %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i8(i32*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i8(,, i32*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i8(,, i32*, , , i64, i64) define @test_vluxseg2_nxv2i32_nxv2i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i32_nxv2i8: @@ -4968,18 +4968,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i8( %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i8( %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i16(i32*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i16(,, i32*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i16(,, i32*, , , i64, i64) define @test_vluxseg2_nxv2i32_nxv2i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i32_nxv2i16: @@ -4998,18 +4998,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i16( %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i16( %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i64(i32*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i64(,, i32*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i64(,, i32*, , , i64, i64) define @test_vluxseg2_nxv2i32_nxv2i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i32_nxv2i64: @@ -5028,18 +5028,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i64( %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i64( %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i32(i32*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i32(,,, i32*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i32(,,, i32*, , , i64, i64) define @test_vluxseg3_nxv2i32_nxv2i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i32_nxv2i32: @@ -5060,18 +5060,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i32( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i32( %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i8(i32*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i8(,,, i32*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i8(,,, i32*, , , i64, i64) define @test_vluxseg3_nxv2i32_nxv2i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i32_nxv2i8: @@ -5092,18 +5092,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i8( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i8( %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i16(i32*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i16(,,, i32*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i16(,,, i32*, , , i64, i64) define @test_vluxseg3_nxv2i32_nxv2i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i32_nxv2i16: @@ -5124,18 +5124,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i16( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i16( %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i64(i32*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i64(,,, i32*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i64(,,, i32*, , , i64, i64) define @test_vluxseg3_nxv2i32_nxv2i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i32_nxv2i64: @@ -5155,18 +5155,18 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i64( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i64( %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i32(i32*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i32(,,,, i32*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i32(,,,, i32*, , , i64, i64) define @test_vluxseg4_nxv2i32_nxv2i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i32_nxv2i32: @@ -5188,18 +5188,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i8(i32*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i8(,,,, i32*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i8(,,,, i32*, , , i64, i64) define @test_vluxseg4_nxv2i32_nxv2i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i32_nxv2i8: @@ -5221,18 +5221,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i16(i32*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i16(,,,, i32*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i16(,,,, i32*, , , i64, i64) define @test_vluxseg4_nxv2i32_nxv2i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i32_nxv2i16: @@ -5254,18 +5254,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i64(i32*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i64(,,,, i32*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i64(,,,, i32*, , , i64, i64) define @test_vluxseg4_nxv2i32_nxv2i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i32_nxv2i64: @@ -5287,18 +5287,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg4ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i32(i32*, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i32(,,,,, i32*, , , i64) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i32(,,,,, i32*, , , i64, i64) define @test_vluxseg5_nxv2i32_nxv2i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i32_nxv2i32: @@ -5321,18 +5321,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i8(i32*, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i8(,,,,, i32*, , , i64) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i8(,,,,, i32*, , , i64, i64) define @test_vluxseg5_nxv2i32_nxv2i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i32_nxv2i8: @@ -5355,18 +5355,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i16(i32*, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i16(,,,,, i32*, , , i64) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i16(,,,,, i32*, , , i64, i64) define @test_vluxseg5_nxv2i32_nxv2i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i32_nxv2i16: @@ -5389,18 +5389,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i64(i32*, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i64(,,,,, i32*, , , i64) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i64(,,,,, i32*, , , i64, i64) define @test_vluxseg5_nxv2i32_nxv2i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i32_nxv2i64: @@ -5423,18 +5423,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg5ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i32(i32*, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i32(,,,,,, i32*, , , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i32(,,,,,, i32*, , , i64, i64) define @test_vluxseg6_nxv2i32_nxv2i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i32_nxv2i32: @@ -5458,18 +5458,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i8(i32*, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i8(,,,,,, i32*, , , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i8(,,,,,, i32*, , , i64, i64) define @test_vluxseg6_nxv2i32_nxv2i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i32_nxv2i8: @@ -5493,18 +5493,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i16(i32*, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i16(,,,,,, i32*, , , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i16(,,,,,, i32*, , , i64, i64) define @test_vluxseg6_nxv2i32_nxv2i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i32_nxv2i16: @@ -5528,18 +5528,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i64(i32*, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i64(,,,,,, i32*, , , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i64(,,,,,, i32*, , , i64, i64) define @test_vluxseg6_nxv2i32_nxv2i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i32_nxv2i64: @@ -5563,18 +5563,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg6ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i32(i32*, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i32(,,,,,,, i32*, , , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i32(,,,,,,, i32*, , , i64, i64) define @test_vluxseg7_nxv2i32_nxv2i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i32_nxv2i32: @@ -5599,18 +5599,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i8(i32*, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i8(,,,,,,, i32*, , , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i8(,,,,,,, i32*, , , i64, i64) define @test_vluxseg7_nxv2i32_nxv2i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i32_nxv2i8: @@ -5635,18 +5635,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i16(i32*, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i16(,,,,,,, i32*, , , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i16(,,,,,,, i32*, , , i64, i64) define @test_vluxseg7_nxv2i32_nxv2i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i32_nxv2i16: @@ -5671,18 +5671,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i64(i32*, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i64(,,,,,,, i32*, , , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i64(,,,,,,, i32*, , , i64, i64) define @test_vluxseg7_nxv2i32_nxv2i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i32_nxv2i64: @@ -5707,18 +5707,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg7ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i32(i32*, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i32(,,,,,,,, i32*, , , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i32(,,,,,,,, i32*, , , i64, i64) define @test_vluxseg8_nxv2i32_nxv2i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i32_nxv2i32: @@ -5744,18 +5744,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i8(i32*, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i8(,,,,,,,, i32*, , , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i8(,,,,,,,, i32*, , , i64, i64) define @test_vluxseg8_nxv2i32_nxv2i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i32_nxv2i8: @@ -5781,18 +5781,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i16(i32*, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i16(,,,,,,,, i32*, , , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i16(,,,,,,,, i32*, , , i64, i64) define @test_vluxseg8_nxv2i32_nxv2i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i32_nxv2i16: @@ -5818,18 +5818,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i64(i32*, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i64(,,,,,,,, i32*, , , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i64(,,,,,,,, i32*, , , i64, i64) define @test_vluxseg8_nxv2i32_nxv2i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i32_nxv2i64: @@ -5855,18 +5855,18 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg8ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i16(i8*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i16(,, i8*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i16(,, i8*, , , i64, i64) define @test_vluxseg2_nxv8i8_nxv8i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i8_nxv8i16: @@ -5885,18 +5885,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i16( %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i16( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i8(i8*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i8(,, i8*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i8(,, i8*, , , i64, i64) define @test_vluxseg2_nxv8i8_nxv8i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i8_nxv8i8: @@ -5915,18 +5915,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i8( %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i8( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i64(i8*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i64(,, i8*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i64(,, i8*, , , i64, i64) define @test_vluxseg2_nxv8i8_nxv8i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i8_nxv8i64: @@ -5945,18 +5945,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i64( %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i64( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i32(i8*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i32(,, i8*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i32(,, i8*, , , i64, i64) define @test_vluxseg2_nxv8i8_nxv8i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i8_nxv8i32: @@ -5975,18 +5975,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i32( %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i32( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i16(i8*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i16(,,, i8*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i16(,,, i8*, , , i64, i64) define @test_vluxseg3_nxv8i8_nxv8i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8i8_nxv8i16: @@ -6006,18 +6006,18 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i8(i8*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i8(,,, i8*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i8(,,, i8*, , , i64, i64) define @test_vluxseg3_nxv8i8_nxv8i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8i8_nxv8i8: @@ -6038,18 +6038,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i64(i8*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i64(,,, i8*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i64(,,, i8*, , , i64, i64) define @test_vluxseg3_nxv8i8_nxv8i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8i8_nxv8i64: @@ -6069,18 +6069,18 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i64( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i64( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i32(i8*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i32(,,, i8*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i32(,,, i8*, , , i64, i64) define @test_vluxseg3_nxv8i8_nxv8i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8i8_nxv8i32: @@ -6100,18 +6100,18 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i16(i8*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i16(,,,, i8*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i16(,,,, i8*, , , i64, i64) define @test_vluxseg4_nxv8i8_nxv8i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8i8_nxv8i16: @@ -6133,18 +6133,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i8(i8*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i8(,,,, i8*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i8(,,,, i8*, , , i64, i64) define @test_vluxseg4_nxv8i8_nxv8i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8i8_nxv8i8: @@ -6166,18 +6166,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i64(i8*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i64(,,,, i8*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i64(,,,, i8*, , , i64, i64) define @test_vluxseg4_nxv8i8_nxv8i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8i8_nxv8i64: @@ -6198,18 +6198,18 @@ ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg4ei64.v v7, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i32(i8*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i32(,,,, i8*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i32(,,,, i8*, , , i64, i64) define @test_vluxseg4_nxv8i8_nxv8i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8i8_nxv8i32: @@ -6230,18 +6230,18 @@ ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i16(i8*, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i16(,,,,, i8*, , , i64) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i16(,,,,, i8*, , , i64, i64) define @test_vluxseg5_nxv8i8_nxv8i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv8i8_nxv8i16: @@ -6264,18 +6264,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i8(i8*, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i8(,,,,, i8*, , , i64) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i8(,,,,, i8*, , , i64, i64) define @test_vluxseg5_nxv8i8_nxv8i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv8i8_nxv8i8: @@ -6298,18 +6298,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i64(i8*, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i64(,,,,, i8*, , , i64) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i64(,,,,, i8*, , , i64, i64) define @test_vluxseg5_nxv8i8_nxv8i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv8i8_nxv8i64: @@ -6331,18 +6331,18 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg5ei64.v v7, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i32(i8*, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i32(,,,,, i8*, , , i64) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i32(,,,,, i8*, , , i64, i64) define @test_vluxseg5_nxv8i8_nxv8i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv8i8_nxv8i32: @@ -6364,18 +6364,18 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i16(i8*, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i16(,,,,,, i8*, , , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i16(,,,,,, i8*, , , i64, i64) define @test_vluxseg6_nxv8i8_nxv8i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv8i8_nxv8i16: @@ -6399,18 +6399,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i8(i8*, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i8(,,,,,, i8*, , , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i8(,,,,,, i8*, , , i64, i64) define @test_vluxseg6_nxv8i8_nxv8i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv8i8_nxv8i8: @@ -6434,18 +6434,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i64(i8*, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i64(,,,,,, i8*, , , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i64(,,,,,, i8*, , , i64, i64) define @test_vluxseg6_nxv8i8_nxv8i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv8i8_nxv8i64: @@ -6468,18 +6468,18 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg6ei64.v v7, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i32(i8*, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i32(,,,,,, i8*, , , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i32(,,,,,, i8*, , , i64, i64) define @test_vluxseg6_nxv8i8_nxv8i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv8i8_nxv8i32: @@ -6503,18 +6503,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v12, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i16(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i16(,,,,,,, i8*, , , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i16(,,,,,,, i8*, , , i64, i64) define @test_vluxseg7_nxv8i8_nxv8i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv8i8_nxv8i16: @@ -6539,18 +6539,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i8(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i8(,,,,,,, i8*, , , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i8(,,,,,,, i8*, , , i64, i64) define @test_vluxseg7_nxv8i8_nxv8i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv8i8_nxv8i8: @@ -6575,18 +6575,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i64(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i64(,,,,,,, i8*, , , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i64(,,,,,,, i8*, , , i64, i64) define @test_vluxseg7_nxv8i8_nxv8i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv8i8_nxv8i64: @@ -6610,18 +6610,18 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg7ei64.v v7, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i32(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i32(,,,,,,, i8*, , , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i32(,,,,,,, i8*, , , i64, i64) define @test_vluxseg7_nxv8i8_nxv8i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv8i8_nxv8i32: @@ -6646,18 +6646,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v12, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i16(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i16(,,,,,,,, i8*, , , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i16(,,,,,,,, i8*, , , i64, i64) define @test_vluxseg8_nxv8i8_nxv8i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv8i8_nxv8i16: @@ -6683,18 +6683,18 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i8(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i8(,,,,,,,, i8*, , , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i8(,,,,,,,, i8*, , , i64, i64) define @test_vluxseg8_nxv8i8_nxv8i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv8i8_nxv8i8: @@ -6720,18 +6720,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i64(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i64(,,,,,,,, i8*, , , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i64(,,,,,,,, i8*, , , i64, i64) define @test_vluxseg8_nxv8i8_nxv8i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv8i8_nxv8i64: @@ -6756,18 +6756,18 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vmv1r.v v14, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg8ei64.v v7, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i32(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i32(,,,,,,,, i8*, , , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i32(,,,,,,,, i8*, , , i64, i64) define @test_vluxseg8_nxv8i8_nxv8i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv8i8_nxv8i32: @@ -6793,18 +6793,18 @@ ; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vmv1r.v v22, v16 ; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv4i64.nxv4i32(i64*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i32(,, i64*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i32(,, i64*, , , i64, i64) define @test_vluxseg2_nxv4i64_nxv4i32(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i64_nxv4i32: @@ -6823,18 +6823,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i32( %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i32( %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv4i64.nxv4i8(i64*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i8(,, i64*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i8(,, i64*, , , i64, i64) define @test_vluxseg2_nxv4i64_nxv4i8(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i64_nxv4i8: @@ -6853,18 +6853,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv4i64_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i8( %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i8( %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv4i64.nxv4i64(i64*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i64(,, i64*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i64(,, i64*, , , i64, i64) define @test_vluxseg2_nxv4i64_nxv4i64(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i64_nxv4i64: @@ -6883,18 +6883,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i64( %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i64( %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv4i64.nxv4i16(i64*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i16(,, i64*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i16(,, i64*, , , i64, i64) define @test_vluxseg2_nxv4i64_nxv4i16(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i64_nxv4i16: @@ -6913,18 +6913,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv4i64_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i16( %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i16( %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i32(i16*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i32(,, i16*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i32(,, i16*, , , i64, i64) define @test_vluxseg2_nxv4i16_nxv4i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i16_nxv4i32: @@ -6943,18 +6943,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i32( %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i32( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i8(i16*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i8(,, i16*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i8(,, i16*, , , i64, i64) define @test_vluxseg2_nxv4i16_nxv4i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i16_nxv4i8: @@ -6973,18 +6973,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i8( %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i8( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i64(i16*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i64(,, i16*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i64(,, i16*, , , i64, i64) define @test_vluxseg2_nxv4i16_nxv4i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i16_nxv4i64: @@ -7003,18 +7003,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i64( %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i64( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i16(i16*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i16(,, i16*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i16(,, i16*, , , i64, i64) define @test_vluxseg2_nxv4i16_nxv4i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i16_nxv4i16: @@ -7033,18 +7033,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i16( %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i16( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i32(i16*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i32(,,, i16*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i32(,,, i16*, , , i64, i64) define @test_vluxseg3_nxv4i16_nxv4i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i16_nxv4i32: @@ -7064,18 +7064,18 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i32( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i32( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i8(i16*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i8(,,, i16*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i8(,,, i16*, , , i64, i64) define @test_vluxseg3_nxv4i16_nxv4i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i16_nxv4i8: @@ -7096,18 +7096,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i64(i16*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i64(,,, i16*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i64(,,, i16*, , , i64, i64) define @test_vluxseg3_nxv4i16_nxv4i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i16_nxv4i64: @@ -7127,18 +7127,18 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i64( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i64( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i16(i16*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i16(,,, i16*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i16(,,, i16*, , , i64, i64) define @test_vluxseg3_nxv4i16_nxv4i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i16_nxv4i16: @@ -7159,18 +7159,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i32(i16*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i32(,,,, i16*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i32(,,,, i16*, , , i64, i64) define @test_vluxseg4_nxv4i16_nxv4i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i16_nxv4i32: @@ -7192,18 +7192,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i8(i16*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i8(,,,, i16*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i8(,,,, i16*, , , i64, i64) define @test_vluxseg4_nxv4i16_nxv4i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i16_nxv4i8: @@ -7225,18 +7225,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i64(i16*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i64(,,,, i16*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i64(,,,, i16*, , , i64, i64) define @test_vluxseg4_nxv4i16_nxv4i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i16_nxv4i64: @@ -7257,18 +7257,18 @@ ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg4ei64.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i16(i16*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i16(,,,, i16*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i16(,,,, i16*, , , i64, i64) define @test_vluxseg4_nxv4i16_nxv4i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i16_nxv4i16: @@ -7290,18 +7290,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i32(i16*, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i32(,,,,, i16*, , , i64) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i32(,,,,, i16*, , , i64, i64) define @test_vluxseg5_nxv4i16_nxv4i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4i16_nxv4i32: @@ -7324,18 +7324,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i8(i16*, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i8(,,,,, i16*, , , i64) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i8(,,,,, i16*, , , i64, i64) define @test_vluxseg5_nxv4i16_nxv4i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4i16_nxv4i8: @@ -7358,18 +7358,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i64(i16*, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i64(,,,,, i16*, , , i64) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i64(,,,,, i16*, , , i64, i64) define @test_vluxseg5_nxv4i16_nxv4i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4i16_nxv4i64: @@ -7391,18 +7391,18 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg5ei64.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i16(i16*, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i16(,,,,, i16*, , , i64) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i16(,,,,, i16*, , , i64, i64) define @test_vluxseg5_nxv4i16_nxv4i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4i16_nxv4i16: @@ -7425,18 +7425,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i32(i16*, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i32(,,,,,, i16*, , , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i32(,,,,,, i16*, , , i64, i64) define @test_vluxseg6_nxv4i16_nxv4i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4i16_nxv4i32: @@ -7460,18 +7460,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i8(i16*, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i8(,,,,,, i16*, , , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i8(,,,,,, i16*, , , i64, i64) define @test_vluxseg6_nxv4i16_nxv4i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4i16_nxv4i8: @@ -7495,18 +7495,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i64(i16*, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i64(,,,,,, i16*, , , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i64(,,,,,, i16*, , , i64, i64) define @test_vluxseg6_nxv4i16_nxv4i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4i16_nxv4i64: @@ -7530,18 +7530,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg6ei64.v v1, (a0), v12, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i16(i16*, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i16(,,,,,, i16*, , , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i16(,,,,,, i16*, , , i64, i64) define @test_vluxseg6_nxv4i16_nxv4i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4i16_nxv4i16: @@ -7565,18 +7565,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i32(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i32(,,,,,,, i16*, , , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i32(,,,,,,, i16*, , , i64, i64) define @test_vluxseg7_nxv4i16_nxv4i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4i16_nxv4i32: @@ -7601,18 +7601,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i8(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i8(,,,,,,, i16*, , , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i8(,,,,,,, i16*, , , i64, i64) define @test_vluxseg7_nxv4i16_nxv4i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4i16_nxv4i8: @@ -7637,18 +7637,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i64(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i64(,,,,,,, i16*, , , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i64(,,,,,,, i16*, , , i64, i64) define @test_vluxseg7_nxv4i16_nxv4i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4i16_nxv4i64: @@ -7673,18 +7673,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg7ei64.v v1, (a0), v12, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i16(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i16(,,,,,,, i16*, , , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i16(,,,,,,, i16*, , , i64, i64) define @test_vluxseg7_nxv4i16_nxv4i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4i16_nxv4i16: @@ -7709,18 +7709,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i32(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i32(,,,,,,,, i16*, , , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i32(,,,,,,,, i16*, , , i64, i64) define @test_vluxseg8_nxv4i16_nxv4i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4i16_nxv4i32: @@ -7746,18 +7746,18 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i8(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i8(,,,,,,,, i16*, , , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i8(,,,,,,,, i16*, , , i64, i64) define @test_vluxseg8_nxv4i16_nxv4i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4i16_nxv4i8: @@ -7783,18 +7783,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i64(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i64(,,,,,,,, i16*, , , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i64(,,,,,,,, i16*, , , i64, i64) define @test_vluxseg8_nxv4i16_nxv4i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4i16_nxv4i64: @@ -7820,18 +7820,18 @@ ; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vmv1r.v v22, v16 ; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg8ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i16(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i16(,,,,,,,, i16*, , , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i16(,,,,,,,, i16*, , , i64, i64) define @test_vluxseg8_nxv4i16_nxv4i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4i16_nxv4i16: @@ -7857,18 +7857,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv1i8.nxv1i64(i8*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i64(,, i8*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i64(,, i8*, , , i64, i64) define @test_vluxseg2_nxv1i8_nxv1i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i8_nxv1i64: @@ -7887,18 +7887,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i64( %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i64( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv1i8.nxv1i32(i8*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i32(,, i8*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i32(,, i8*, , , i64, i64) define @test_vluxseg2_nxv1i8_nxv1i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i8_nxv1i32: @@ -7917,18 +7917,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i32( %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i32( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv1i8.nxv1i16(i8*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i16(,, i8*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i16(,, i8*, , , i64, i64) define @test_vluxseg2_nxv1i8_nxv1i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i8_nxv1i16: @@ -7947,18 +7947,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i16( %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i16( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv1i8.nxv1i8(i8*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i8(,, i8*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i8(,, i8*, , , i64, i64) define @test_vluxseg2_nxv1i8_nxv1i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i8_nxv1i8: @@ -7977,18 +7977,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i8( %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i8( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv1i8.nxv1i64(i8*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i64(,,, i8*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i64(,,, i8*, , , i64, i64) define @test_vluxseg3_nxv1i8_nxv1i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i8_nxv1i64: @@ -8009,18 +8009,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg3ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i64( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i64( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv1i8.nxv1i32(i8*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i32(,,, i8*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i32(,,, i8*, , , i64, i64) define @test_vluxseg3_nxv1i8_nxv1i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i8_nxv1i32: @@ -8041,18 +8041,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv1i8.nxv1i16(i8*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i16(,,, i8*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i16(,,, i8*, , , i64, i64) define @test_vluxseg3_nxv1i8_nxv1i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i8_nxv1i16: @@ -8073,18 +8073,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv1i8.nxv1i8(i8*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i8(,,, i8*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i8(,,, i8*, , , i64, i64) define @test_vluxseg3_nxv1i8_nxv1i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i8_nxv1i8: @@ -8105,18 +8105,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv1i8.nxv1i64(i8*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i64(,,,, i8*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i64(,,,, i8*, , , i64, i64) define @test_vluxseg4_nxv1i8_nxv1i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i8_nxv1i64: @@ -8138,18 +8138,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg4ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv1i8.nxv1i32(i8*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i32(,,,, i8*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i32(,,,, i8*, , , i64, i64) define @test_vluxseg4_nxv1i8_nxv1i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i8_nxv1i32: @@ -8171,18 +8171,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv1i8.nxv1i16(i8*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i16(,,,, i8*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i16(,,,, i8*, , , i64, i64) define @test_vluxseg4_nxv1i8_nxv1i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i8_nxv1i16: @@ -8204,18 +8204,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv1i8.nxv1i8(i8*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i8(,,,, i8*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i8(,,,, i8*, , , i64, i64) define @test_vluxseg4_nxv1i8_nxv1i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i8_nxv1i8: @@ -8237,18 +8237,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv1i8.nxv1i64(i8*, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i64(,,,,, i8*, , , i64) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i64(,,,,, i8*, , , i64, i64) define @test_vluxseg5_nxv1i8_nxv1i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i8_nxv1i64: @@ -8271,18 +8271,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg5ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv1i8.nxv1i32(i8*, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i32(,,,,, i8*, , , i64) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i32(,,,,, i8*, , , i64, i64) define @test_vluxseg5_nxv1i8_nxv1i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i8_nxv1i32: @@ -8305,18 +8305,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv1i8.nxv1i16(i8*, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i16(,,,,, i8*, , , i64) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i16(,,,,, i8*, , , i64, i64) define @test_vluxseg5_nxv1i8_nxv1i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i8_nxv1i16: @@ -8339,18 +8339,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv1i8.nxv1i8(i8*, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i8(,,,,, i8*, , , i64) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i8(,,,,, i8*, , , i64, i64) define @test_vluxseg5_nxv1i8_nxv1i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i8_nxv1i8: @@ -8373,18 +8373,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i8.nxv1i64(i8*, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i64(,,,,,, i8*, , , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i64(,,,,,, i8*, , , i64, i64) define @test_vluxseg6_nxv1i8_nxv1i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i8_nxv1i64: @@ -8408,18 +8408,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg6ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i8.nxv1i32(i8*, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i32(,,,,,, i8*, , , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i32(,,,,,, i8*, , , i64, i64) define @test_vluxseg6_nxv1i8_nxv1i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i8_nxv1i32: @@ -8443,18 +8443,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i8.nxv1i16(i8*, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i16(,,,,,, i8*, , , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i16(,,,,,, i8*, , , i64, i64) define @test_vluxseg6_nxv1i8_nxv1i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i8_nxv1i16: @@ -8478,18 +8478,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i8.nxv1i8(i8*, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i8(,,,,,, i8*, , , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i8(,,,,,, i8*, , , i64, i64) define @test_vluxseg6_nxv1i8_nxv1i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i8_nxv1i8: @@ -8513,18 +8513,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i8.nxv1i64(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i64(,,,,,,, i8*, , , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i64(,,,,,,, i8*, , , i64, i64) define @test_vluxseg7_nxv1i8_nxv1i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i8_nxv1i64: @@ -8549,18 +8549,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg7ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i8.nxv1i32(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i32(,,,,,,, i8*, , , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i32(,,,,,,, i8*, , , i64, i64) define @test_vluxseg7_nxv1i8_nxv1i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i8_nxv1i32: @@ -8585,18 +8585,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i8.nxv1i16(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i16(,,,,,,, i8*, , , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i16(,,,,,,, i8*, , , i64, i64) define @test_vluxseg7_nxv1i8_nxv1i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i8_nxv1i16: @@ -8621,18 +8621,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i8.nxv1i8(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i8(,,,,,,, i8*, , , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i8(,,,,,,, i8*, , , i64, i64) define @test_vluxseg7_nxv1i8_nxv1i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i8_nxv1i8: @@ -8657,18 +8657,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i8.nxv1i64(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i64(,,,,,,,, i8*, , , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i64(,,,,,,,, i8*, , , i64, i64) define @test_vluxseg8_nxv1i8_nxv1i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i8_nxv1i64: @@ -8694,18 +8694,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i8.nxv1i32(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i32(,,,,,,,, i8*, , , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i32(,,,,,,,, i8*, , , i64, i64) define @test_vluxseg8_nxv1i8_nxv1i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i8_nxv1i32: @@ -8731,18 +8731,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i8.nxv1i16(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i16(,,,,,,,, i8*, , , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i16(,,,,,,,, i8*, , , i64, i64) define @test_vluxseg8_nxv1i8_nxv1i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i8_nxv1i16: @@ -8768,18 +8768,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i8.nxv1i8(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i8(,,,,,,,, i8*, , , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i8(,,,,,,,, i8*, , , i64, i64) define @test_vluxseg8_nxv1i8_nxv1i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i8_nxv1i8: @@ -8805,18 +8805,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv2i8.nxv2i32(i8*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i32(,, i8*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i32(,, i8*, , , i64, i64) define @test_vluxseg2_nxv2i8_nxv2i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i8_nxv2i32: @@ -8835,18 +8835,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i32( %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i32( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv2i8.nxv2i8(i8*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i8(,, i8*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i8(,, i8*, , , i64, i64) define @test_vluxseg2_nxv2i8_nxv2i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i8_nxv2i8: @@ -8865,18 +8865,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i8( %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i8( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv2i8.nxv2i16(i8*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i16(,, i8*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i16(,, i8*, , , i64, i64) define @test_vluxseg2_nxv2i8_nxv2i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i8_nxv2i16: @@ -8895,18 +8895,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i16( %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i16( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv2i8.nxv2i64(i8*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i64(,, i8*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i64(,, i8*, , , i64, i64) define @test_vluxseg2_nxv2i8_nxv2i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i8_nxv2i64: @@ -8925,18 +8925,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i64( %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i64( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv2i8.nxv2i32(i8*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i32(,,, i8*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i32(,,, i8*, , , i64, i64) define @test_vluxseg3_nxv2i8_nxv2i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i8_nxv2i32: @@ -8957,18 +8957,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv2i8.nxv2i8(i8*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i8(,,, i8*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i8(,,, i8*, , , i64, i64) define @test_vluxseg3_nxv2i8_nxv2i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i8_nxv2i8: @@ -8989,18 +8989,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv2i8.nxv2i16(i8*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i16(,,, i8*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i16(,,, i8*, , , i64, i64) define @test_vluxseg3_nxv2i8_nxv2i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i8_nxv2i16: @@ -9021,18 +9021,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv2i8.nxv2i64(i8*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i64(,,, i8*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i64(,,, i8*, , , i64, i64) define @test_vluxseg3_nxv2i8_nxv2i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i8_nxv2i64: @@ -9052,18 +9052,18 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i64( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i64( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv2i8.nxv2i32(i8*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i32(,,,, i8*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i32(,,,, i8*, , , i64, i64) define @test_vluxseg4_nxv2i8_nxv2i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i8_nxv2i32: @@ -9085,18 +9085,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv2i8.nxv2i8(i8*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i8(,,,, i8*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i8(,,,, i8*, , , i64, i64) define @test_vluxseg4_nxv2i8_nxv2i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i8_nxv2i8: @@ -9118,18 +9118,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv2i8.nxv2i16(i8*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i16(,,,, i8*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i16(,,,, i8*, , , i64, i64) define @test_vluxseg4_nxv2i8_nxv2i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i8_nxv2i16: @@ -9151,18 +9151,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv2i8.nxv2i64(i8*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i64(,,,, i8*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i64(,,,, i8*, , , i64, i64) define @test_vluxseg4_nxv2i8_nxv2i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i8_nxv2i64: @@ -9184,18 +9184,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg4ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv2i8.nxv2i32(i8*, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i32(,,,,, i8*, , , i64) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i32(,,,,, i8*, , , i64, i64) define @test_vluxseg5_nxv2i8_nxv2i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i8_nxv2i32: @@ -9218,18 +9218,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv2i8.nxv2i8(i8*, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i8(,,,,, i8*, , , i64) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i8(,,,,, i8*, , , i64, i64) define @test_vluxseg5_nxv2i8_nxv2i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i8_nxv2i8: @@ -9252,18 +9252,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv2i8.nxv2i16(i8*, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i16(,,,,, i8*, , , i64) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i16(,,,,, i8*, , , i64, i64) define @test_vluxseg5_nxv2i8_nxv2i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i8_nxv2i16: @@ -9286,18 +9286,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv2i8.nxv2i64(i8*, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i64(,,,,, i8*, , , i64) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i64(,,,,, i8*, , , i64, i64) define @test_vluxseg5_nxv2i8_nxv2i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i8_nxv2i64: @@ -9320,18 +9320,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg5ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i8.nxv2i32(i8*, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i32(,,,,,, i8*, , , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i32(,,,,,, i8*, , , i64, i64) define @test_vluxseg6_nxv2i8_nxv2i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i8_nxv2i32: @@ -9355,18 +9355,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i8.nxv2i8(i8*, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i8(,,,,,, i8*, , , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i8(,,,,,, i8*, , , i64, i64) define @test_vluxseg6_nxv2i8_nxv2i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i8_nxv2i8: @@ -9390,18 +9390,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i8.nxv2i16(i8*, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i16(,,,,,, i8*, , , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i16(,,,,,, i8*, , , i64, i64) define @test_vluxseg6_nxv2i8_nxv2i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i8_nxv2i16: @@ -9425,18 +9425,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i8.nxv2i64(i8*, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i64(,,,,,, i8*, , , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i64(,,,,,, i8*, , , i64, i64) define @test_vluxseg6_nxv2i8_nxv2i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i8_nxv2i64: @@ -9460,18 +9460,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg6ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i8.nxv2i32(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i32(,,,,,,, i8*, , , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i32(,,,,,,, i8*, , , i64, i64) define @test_vluxseg7_nxv2i8_nxv2i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i8_nxv2i32: @@ -9496,18 +9496,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i8.nxv2i8(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i8(,,,,,,, i8*, , , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i8(,,,,,,, i8*, , , i64, i64) define @test_vluxseg7_nxv2i8_nxv2i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i8_nxv2i8: @@ -9532,18 +9532,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i8.nxv2i16(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i16(,,,,,,, i8*, , , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i16(,,,,,,, i8*, , , i64, i64) define @test_vluxseg7_nxv2i8_nxv2i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i8_nxv2i16: @@ -9568,18 +9568,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i8.nxv2i64(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i64(,,,,,,, i8*, , , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i64(,,,,,,, i8*, , , i64, i64) define @test_vluxseg7_nxv2i8_nxv2i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i8_nxv2i64: @@ -9604,18 +9604,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg7ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i8.nxv2i32(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i32(,,,,,,,, i8*, , , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i32(,,,,,,,, i8*, , , i64, i64) define @test_vluxseg8_nxv2i8_nxv2i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i8_nxv2i32: @@ -9641,18 +9641,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i8.nxv2i8(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i8(,,,,,,,, i8*, , , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i8(,,,,,,,, i8*, , , i64, i64) define @test_vluxseg8_nxv2i8_nxv2i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i8_nxv2i8: @@ -9678,18 +9678,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i8.nxv2i16(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i16(,,,,,,,, i8*, , , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i16(,,,,,,,, i8*, , , i64, i64) define @test_vluxseg8_nxv2i8_nxv2i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i8_nxv2i16: @@ -9715,18 +9715,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i8.nxv2i64(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i64(,,,,,,,, i8*, , , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i64(,,,,,,,, i8*, , , i64, i64) define @test_vluxseg8_nxv2i8_nxv2i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i8_nxv2i64: @@ -9752,18 +9752,18 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg8ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i16(i32*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i16(,, i32*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i16(,, i32*, , , i64, i64) define @test_vluxseg2_nxv8i32_nxv8i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i32_nxv8i16: @@ -9782,18 +9782,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i16( %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i16( %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i8(i32*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i8(,, i32*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i8(,, i32*, , , i64, i64) define @test_vluxseg2_nxv8i32_nxv8i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i32_nxv8i8: @@ -9812,18 +9812,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv8i32_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i8( %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i8( %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i64(i32*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i64(,, i32*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i64(,, i32*, , , i64, i64) define @test_vluxseg2_nxv8i32_nxv8i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i32_nxv8i64: @@ -9842,18 +9842,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v4, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i64( %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i64( %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i32(i32*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i32(,, i32*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i32(,, i32*, , , i64, i64) define @test_vluxseg2_nxv8i32_nxv8i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i32_nxv8i32: @@ -9872,18 +9872,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i32( %val, %val, i32* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i32( %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv32i8.nxv32i16(i8*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i16(,, i8*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i16(,, i8*, , , i64, i64) define @test_vluxseg2_nxv32i8_nxv32i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv32i8_nxv32i16: @@ -9902,18 +9902,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i16( %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i16( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv32i8.nxv32i8(i8*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i8(,, i8*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i8(,, i8*, , , i64, i64) define @test_vluxseg2_nxv32i8_nxv32i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv32i8_nxv32i8: @@ -9932,18 +9932,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i8( %val, %val, i8* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i8( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv2i16.nxv2i32(i16*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i32(,, i16*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i32(,, i16*, , , i64, i64) define @test_vluxseg2_nxv2i16_nxv2i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i16_nxv2i32: @@ -9962,18 +9962,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i32( %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i32( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv2i16.nxv2i8(i16*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i8(,, i16*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i8(,, i16*, , , i64, i64) define @test_vluxseg2_nxv2i16_nxv2i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i16_nxv2i8: @@ -9992,18 +9992,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i8( %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i8( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv2i16.nxv2i16(i16*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i16(,, i16*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i16(,, i16*, , , i64, i64) define @test_vluxseg2_nxv2i16_nxv2i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i16_nxv2i16: @@ -10022,18 +10022,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i16( %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i16( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv2i16.nxv2i64(i16*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i64(,, i16*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i64(,, i16*, , , i64, i64) define @test_vluxseg2_nxv2i16_nxv2i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i16_nxv2i64: @@ -10052,18 +10052,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i64( %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i64( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv2i16.nxv2i32(i16*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i32(,,, i16*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i32(,,, i16*, , , i64, i64) define @test_vluxseg3_nxv2i16_nxv2i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i16_nxv2i32: @@ -10084,18 +10084,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i32( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i32( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv2i16.nxv2i8(i16*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i8(,,, i16*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i8(,,, i16*, , , i64, i64) define @test_vluxseg3_nxv2i16_nxv2i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i16_nxv2i8: @@ -10116,18 +10116,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv2i16.nxv2i16(i16*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i16(,,, i16*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i16(,,, i16*, , , i64, i64) define @test_vluxseg3_nxv2i16_nxv2i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i16_nxv2i16: @@ -10148,18 +10148,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv2i16.nxv2i64(i16*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i64(,,, i16*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i64(,,, i16*, , , i64, i64) define @test_vluxseg3_nxv2i16_nxv2i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i16_nxv2i64: @@ -10179,18 +10179,18 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i64( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i64( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv2i16.nxv2i32(i16*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i32(,,,, i16*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i32(,,,, i16*, , , i64, i64) define @test_vluxseg4_nxv2i16_nxv2i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i16_nxv2i32: @@ -10212,18 +10212,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv2i16.nxv2i8(i16*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i8(,,,, i16*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i8(,,,, i16*, , , i64, i64) define @test_vluxseg4_nxv2i16_nxv2i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i16_nxv2i8: @@ -10245,18 +10245,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv2i16.nxv2i16(i16*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i16(,,,, i16*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i16(,,,, i16*, , , i64, i64) define @test_vluxseg4_nxv2i16_nxv2i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i16_nxv2i16: @@ -10278,18 +10278,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv2i16.nxv2i64(i16*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i64(,,,, i16*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i64(,,,, i16*, , , i64, i64) define @test_vluxseg4_nxv2i16_nxv2i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i16_nxv2i64: @@ -10311,18 +10311,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv2i16.nxv2i32(i16*, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i32(,,,,, i16*, , , i64) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i32(,,,,, i16*, , , i64, i64) define @test_vluxseg5_nxv2i16_nxv2i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i16_nxv2i32: @@ -10345,18 +10345,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv2i16.nxv2i8(i16*, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i8(,,,,, i16*, , , i64) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i8(,,,,, i16*, , , i64, i64) define @test_vluxseg5_nxv2i16_nxv2i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i16_nxv2i8: @@ -10379,18 +10379,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv2i16.nxv2i16(i16*, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i16(,,,,, i16*, , , i64) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i16(,,,,, i16*, , , i64, i64) define @test_vluxseg5_nxv2i16_nxv2i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i16_nxv2i16: @@ -10413,18 +10413,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv2i16.nxv2i64(i16*, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i64(,,,,, i16*, , , i64) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i64(,,,,, i16*, , , i64, i64) define @test_vluxseg5_nxv2i16_nxv2i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i16_nxv2i64: @@ -10447,18 +10447,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i16.nxv2i32(i16*, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i32(,,,,,, i16*, , , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i32(,,,,,, i16*, , , i64, i64) define @test_vluxseg6_nxv2i16_nxv2i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i16_nxv2i32: @@ -10482,18 +10482,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i16.nxv2i8(i16*, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i8(,,,,,, i16*, , , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i8(,,,,,, i16*, , , i64, i64) define @test_vluxseg6_nxv2i16_nxv2i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i16_nxv2i8: @@ -10517,18 +10517,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i16.nxv2i16(i16*, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i16(,,,,,, i16*, , , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i16(,,,,,, i16*, , , i64, i64) define @test_vluxseg6_nxv2i16_nxv2i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i16_nxv2i16: @@ -10552,18 +10552,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i16.nxv2i64(i16*, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i64(,,,,,, i16*, , , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i64(,,,,,, i16*, , , i64, i64) define @test_vluxseg6_nxv2i16_nxv2i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i16_nxv2i64: @@ -10587,18 +10587,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i16.nxv2i32(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i32(,,,,,,, i16*, , , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i32(,,,,,,, i16*, , , i64, i64) define @test_vluxseg7_nxv2i16_nxv2i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i16_nxv2i32: @@ -10623,18 +10623,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i16.nxv2i8(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i8(,,,,,,, i16*, , , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i8(,,,,,,, i16*, , , i64, i64) define @test_vluxseg7_nxv2i16_nxv2i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i16_nxv2i8: @@ -10659,18 +10659,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i16.nxv2i16(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i16(,,,,,,, i16*, , , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i16(,,,,,,, i16*, , , i64, i64) define @test_vluxseg7_nxv2i16_nxv2i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i16_nxv2i16: @@ -10695,18 +10695,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i16.nxv2i64(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i64(,,,,,,, i16*, , , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i64(,,,,,,, i16*, , , i64, i64) define @test_vluxseg7_nxv2i16_nxv2i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i16_nxv2i64: @@ -10731,18 +10731,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i16.nxv2i32(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i32(,,,,,,,, i16*, , , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i32(,,,,,,,, i16*, , , i64, i64) define @test_vluxseg8_nxv2i16_nxv2i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i16_nxv2i32: @@ -10768,18 +10768,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i16.nxv2i8(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i8(,,,,,,,, i16*, , , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i8(,,,,,,,, i16*, , , i64, i64) define @test_vluxseg8_nxv2i16_nxv2i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i16_nxv2i8: @@ -10805,18 +10805,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i16.nxv2i16(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i16(,,,,,,,, i16*, , , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i16(,,,,,,,, i16*, , , i64, i64) define @test_vluxseg8_nxv2i16_nxv2i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i16_nxv2i16: @@ -10842,18 +10842,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i16.nxv2i64(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i64(,,,,,,,, i16*, , , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i64(,,,,,,,, i16*, , , i64, i64) define @test_vluxseg8_nxv2i16_nxv2i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i16_nxv2i64: @@ -10879,18 +10879,18 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv2i64.nxv2i32(i64*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i32(,, i64*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i32(,, i64*, , , i64, i64) define @test_vluxseg2_nxv2i64_nxv2i32(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i64_nxv2i32: @@ -10909,18 +10909,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i32( %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i32( %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv2i64.nxv2i8(i64*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i8(,, i64*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i8(,, i64*, , , i64, i64) define @test_vluxseg2_nxv2i64_nxv2i8(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i64_nxv2i8: @@ -10939,18 +10939,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv2i64_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i8( %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i8( %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv2i64.nxv2i16(i64*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i16(,, i64*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i16(,, i64*, , , i64, i64) define @test_vluxseg2_nxv2i64_nxv2i16(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i64_nxv2i16: @@ -10969,18 +10969,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv2i64_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i16( %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i16( %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv2i64.nxv2i64(i64*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i64(,, i64*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i64(,, i64*, , , i64, i64) define @test_vluxseg2_nxv2i64_nxv2i64(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i64_nxv2i64: @@ -10999,18 +10999,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i64( %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i64( %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv2i64.nxv2i32(i64*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i32(,,, i64*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i32(,,, i64*, , , i64, i64) define @test_vluxseg3_nxv2i64_nxv2i32(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i64_nxv2i32: @@ -11031,18 +11031,18 @@ ; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i32( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i32( %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv2i64.nxv2i8(i64*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i8(,,, i64*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i8(,,, i64*, , , i64, i64) define @test_vluxseg3_nxv2i64_nxv2i8(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i64_nxv2i8: @@ -11063,18 +11063,18 @@ ; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i8( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i8( %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv2i64.nxv2i16(i64*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i16(,,, i64*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i16(,,, i64*, , , i64, i64) define @test_vluxseg3_nxv2i64_nxv2i16(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i64_nxv2i16: @@ -11095,18 +11095,18 @@ ; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i16( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i16( %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv2i64.nxv2i64(i64*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i64(,,, i64*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i64(,,, i64*, , , i64, i64) define @test_vluxseg3_nxv2i64_nxv2i64(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i64_nxv2i64: @@ -11127,18 +11127,18 @@ ; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg3ei64.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i64( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i64( %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv2i64.nxv2i32(i64*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i32(,,,, i64*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i32(,,,, i64*, , , i64, i64) define @test_vluxseg4_nxv2i64_nxv2i32(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i64_nxv2i32: @@ -11160,18 +11160,18 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i32( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i32( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv2i64.nxv2i8(i64*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i8(,,,, i64*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i8(,,,, i64*, , , i64, i64) define @test_vluxseg4_nxv2i64_nxv2i8(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i64_nxv2i8: @@ -11193,18 +11193,18 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i8( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i8( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv2i64.nxv2i16(i64*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i16(,,,, i64*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i16(,,,, i64*, , , i64, i64) define @test_vluxseg4_nxv2i64_nxv2i16(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i64_nxv2i16: @@ -11226,18 +11226,18 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i16( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i16( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv2i64.nxv2i64(i64*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i64(,,,, i64*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i64(,,,, i64*, , , i64, i64) define @test_vluxseg4_nxv2i64_nxv2i64(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i64_nxv2i64: @@ -11259,18 +11259,18 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg4ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i64( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i64( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv16f16.nxv16i16(half*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i16(,, half*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i16(,, half*, , , i64, i64) define @test_vluxseg2_nxv16f16_nxv16i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16f16_nxv16i16: @@ -11289,18 +11289,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i16( %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i16( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv16f16.nxv16i8(half*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i8(,, half*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i8(,, half*, , , i64, i64) define @test_vluxseg2_nxv16f16_nxv16i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16f16_nxv16i8: @@ -11319,18 +11319,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i8( %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i8( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv16f16.nxv16i32(half*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i32(,, half*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i32(,, half*, , , i64, i64) define @test_vluxseg2_nxv16f16_nxv16i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16f16_nxv16i32: @@ -11349,18 +11349,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v4, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i32( %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i32( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i32(double*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i32(,, double*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i32(,, double*, , , i64, i64) define @test_vluxseg2_nxv4f64_nxv4i32(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f64_nxv4i32: @@ -11379,18 +11379,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i32( %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i32( %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i8(double*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i8(,, double*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i8(,, double*, , , i64, i64) define @test_vluxseg2_nxv4f64_nxv4i8(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f64_nxv4i8: @@ -11409,18 +11409,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv4f64_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i8( %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i8( %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i64(double*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i64(,, double*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i64(,, double*, , , i64, i64) define @test_vluxseg2_nxv4f64_nxv4i64(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f64_nxv4i64: @@ -11439,18 +11439,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i64( %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i64( %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i16(double*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i16(,, double*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i16(,, double*, , , i64, i64) define @test_vluxseg2_nxv4f64_nxv4i16(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f64_nxv4i16: @@ -11469,18 +11469,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv4f64_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i16( %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i16( %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i64(double*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i64(,, double*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i64(,, double*, , , i64, i64) define @test_vluxseg2_nxv1f64_nxv1i64(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f64_nxv1i64: @@ -11499,18 +11499,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i64( %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i64( %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i32(double*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i32(,, double*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i32(,, double*, , , i64, i64) define @test_vluxseg2_nxv1f64_nxv1i32(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f64_nxv1i32: @@ -11529,18 +11529,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i32( %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i32( %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i16(double*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i16(,, double*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i16(,, double*, , , i64, i64) define @test_vluxseg2_nxv1f64_nxv1i16(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f64_nxv1i16: @@ -11559,18 +11559,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i16( %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i16( %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i8(double*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i8(,, double*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i8(,, double*, , , i64, i64) define @test_vluxseg2_nxv1f64_nxv1i8(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f64_nxv1i8: @@ -11589,18 +11589,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i8( %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i8( %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i64(double*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i64(,,, double*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i64(,,, double*, , , i64, i64) define @test_vluxseg3_nxv1f64_nxv1i64(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f64_nxv1i64: @@ -11621,18 +11621,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg3ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i64( %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i64( %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i32(double*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i32(,,, double*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i32(,,, double*, , , i64, i64) define @test_vluxseg3_nxv1f64_nxv1i32(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f64_nxv1i32: @@ -11653,18 +11653,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i32( %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i32( %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i16(double*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i16(,,, double*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i16(,,, double*, , , i64, i64) define @test_vluxseg3_nxv1f64_nxv1i16(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f64_nxv1i16: @@ -11685,18 +11685,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i16( %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i16( %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i8(double*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i8(,,, double*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i8(,,, double*, , , i64, i64) define @test_vluxseg3_nxv1f64_nxv1i8(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f64_nxv1i8: @@ -11717,18 +11717,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i8( %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i8( %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i64(double*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i64(,,,, double*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i64(,,,, double*, , , i64, i64) define @test_vluxseg4_nxv1f64_nxv1i64(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f64_nxv1i64: @@ -11750,18 +11750,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg4ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i32(double*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i32(,,,, double*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i32(,,,, double*, , , i64, i64) define @test_vluxseg4_nxv1f64_nxv1i32(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f64_nxv1i32: @@ -11783,18 +11783,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i16(double*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i16(,,,, double*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i16(,,,, double*, , , i64, i64) define @test_vluxseg4_nxv1f64_nxv1i16(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f64_nxv1i16: @@ -11816,18 +11816,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i8(double*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i8(,,,, double*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i8(,,,, double*, , , i64, i64) define @test_vluxseg4_nxv1f64_nxv1i8(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f64_nxv1i8: @@ -11849,18 +11849,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i64(double*, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i64(,,,,, double*, , , i64) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i64(,,,,, double*, , , i64, i64) define @test_vluxseg5_nxv1f64_nxv1i64(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f64_nxv1i64: @@ -11883,18 +11883,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg5ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i32(double*, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i32(,,,,, double*, , , i64) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i32(,,,,, double*, , , i64, i64) define @test_vluxseg5_nxv1f64_nxv1i32(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f64_nxv1i32: @@ -11917,18 +11917,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i16(double*, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i16(,,,,, double*, , , i64) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i16(,,,,, double*, , , i64, i64) define @test_vluxseg5_nxv1f64_nxv1i16(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f64_nxv1i16: @@ -11951,18 +11951,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i8(double*, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i8(,,,,, double*, , , i64) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i8(,,,,, double*, , , i64, i64) define @test_vluxseg5_nxv1f64_nxv1i8(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f64_nxv1i8: @@ -11985,18 +11985,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i64(double*, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i64(,,,,,, double*, , , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i64(,,,,,, double*, , , i64, i64) define @test_vluxseg6_nxv1f64_nxv1i64(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f64_nxv1i64: @@ -12020,18 +12020,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg6ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i32(double*, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i32(,,,,,, double*, , , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i32(,,,,,, double*, , , i64, i64) define @test_vluxseg6_nxv1f64_nxv1i32(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f64_nxv1i32: @@ -12055,18 +12055,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i16(double*, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i16(,,,,,, double*, , , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i16(,,,,,, double*, , , i64, i64) define @test_vluxseg6_nxv1f64_nxv1i16(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f64_nxv1i16: @@ -12090,18 +12090,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i8(double*, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i8(,,,,,, double*, , , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i8(,,,,,, double*, , , i64, i64) define @test_vluxseg6_nxv1f64_nxv1i8(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f64_nxv1i8: @@ -12125,18 +12125,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i64(double*, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i64(,,,,,,, double*, , , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i64(,,,,,,, double*, , , i64, i64) define @test_vluxseg7_nxv1f64_nxv1i64(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f64_nxv1i64: @@ -12161,18 +12161,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg7ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i32(double*, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i32(,,,,,,, double*, , , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i32(,,,,,,, double*, , , i64, i64) define @test_vluxseg7_nxv1f64_nxv1i32(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f64_nxv1i32: @@ -12197,18 +12197,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i16(double*, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i16(,,,,,,, double*, , , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i16(,,,,,,, double*, , , i64, i64) define @test_vluxseg7_nxv1f64_nxv1i16(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f64_nxv1i16: @@ -12233,18 +12233,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i8(double*, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i8(,,,,,,, double*, , , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i8(,,,,,,, double*, , , i64, i64) define @test_vluxseg7_nxv1f64_nxv1i8(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f64_nxv1i8: @@ -12269,18 +12269,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i64(double*, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i64(,,,,,,,, double*, , , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i64(,,,,,,,, double*, , , i64, i64) define @test_vluxseg8_nxv1f64_nxv1i64(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f64_nxv1i64: @@ -12306,18 +12306,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i32(double*, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i32(,,,,,,,, double*, , , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i32(,,,,,,,, double*, , , i64, i64) define @test_vluxseg8_nxv1f64_nxv1i32(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f64_nxv1i32: @@ -12343,18 +12343,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i16(double*, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i16(,,,,,,,, double*, , , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i16(,,,,,,,, double*, , , i64, i64) define @test_vluxseg8_nxv1f64_nxv1i16(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f64_nxv1i16: @@ -12380,18 +12380,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i8(double*, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i8(,,,,,,,, double*, , , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i8(,,,,,,,, double*, , , i64, i64) define @test_vluxseg8_nxv1f64_nxv1i8(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f64_nxv1i8: @@ -12417,18 +12417,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i32(float*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i32(,, float*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i32(,, float*, , , i64, i64) define @test_vluxseg2_nxv2f32_nxv2i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f32_nxv2i32: @@ -12447,18 +12447,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i32( %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i32( %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i8(float*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i8(,, float*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i8(,, float*, , , i64, i64) define @test_vluxseg2_nxv2f32_nxv2i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f32_nxv2i8: @@ -12477,18 +12477,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i8( %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i8( %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i16(float*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i16(,, float*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i16(,, float*, , , i64, i64) define @test_vluxseg2_nxv2f32_nxv2i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f32_nxv2i16: @@ -12507,18 +12507,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i16( %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i16( %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i64(float*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i64(,, float*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i64(,, float*, , , i64, i64) define @test_vluxseg2_nxv2f32_nxv2i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f32_nxv2i64: @@ -12537,18 +12537,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i64( %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i64( %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i32(float*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i32(,,, float*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i32(,,, float*, , , i64, i64) define @test_vluxseg3_nxv2f32_nxv2i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f32_nxv2i32: @@ -12569,18 +12569,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i32( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i32( %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i8(float*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i8(,,, float*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i8(,,, float*, , , i64, i64) define @test_vluxseg3_nxv2f32_nxv2i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f32_nxv2i8: @@ -12601,18 +12601,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i8( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i8( %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i16(float*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i16(,,, float*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i16(,,, float*, , , i64, i64) define @test_vluxseg3_nxv2f32_nxv2i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f32_nxv2i16: @@ -12633,18 +12633,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i16( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i16( %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i64(float*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i64(,,, float*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i64(,,, float*, , , i64, i64) define @test_vluxseg3_nxv2f32_nxv2i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f32_nxv2i64: @@ -12664,18 +12664,18 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i64( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i64( %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i32(float*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i32(,,,, float*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i32(,,,, float*, , , i64, i64) define @test_vluxseg4_nxv2f32_nxv2i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f32_nxv2i32: @@ -12697,18 +12697,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i8(float*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i8(,,,, float*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i8(,,,, float*, , , i64, i64) define @test_vluxseg4_nxv2f32_nxv2i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f32_nxv2i8: @@ -12730,18 +12730,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i16(float*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i16(,,,, float*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i16(,,,, float*, , , i64, i64) define @test_vluxseg4_nxv2f32_nxv2i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f32_nxv2i16: @@ -12763,18 +12763,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i64(float*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i64(,,,, float*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i64(,,,, float*, , , i64, i64) define @test_vluxseg4_nxv2f32_nxv2i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f32_nxv2i64: @@ -12796,18 +12796,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg4ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i32(float*, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i32(,,,,, float*, , , i64) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i32(,,,,, float*, , , i64, i64) define @test_vluxseg5_nxv2f32_nxv2i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2f32_nxv2i32: @@ -12830,18 +12830,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i8(float*, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i8(,,,,, float*, , , i64) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i8(,,,,, float*, , , i64, i64) define @test_vluxseg5_nxv2f32_nxv2i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2f32_nxv2i8: @@ -12864,18 +12864,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i16(float*, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i16(,,,,, float*, , , i64) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i16(,,,,, float*, , , i64, i64) define @test_vluxseg5_nxv2f32_nxv2i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2f32_nxv2i16: @@ -12898,18 +12898,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i64(float*, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i64(,,,,, float*, , , i64) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i64(,,,,, float*, , , i64, i64) define @test_vluxseg5_nxv2f32_nxv2i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2f32_nxv2i64: @@ -12932,18 +12932,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg5ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i32(float*, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i32(,,,,,, float*, , , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i32(,,,,,, float*, , , i64, i64) define @test_vluxseg6_nxv2f32_nxv2i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2f32_nxv2i32: @@ -12967,18 +12967,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i8(float*, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i8(,,,,,, float*, , , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i8(,,,,,, float*, , , i64, i64) define @test_vluxseg6_nxv2f32_nxv2i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2f32_nxv2i8: @@ -13002,18 +13002,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i16(float*, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i16(,,,,,, float*, , , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i16(,,,,,, float*, , , i64, i64) define @test_vluxseg6_nxv2f32_nxv2i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2f32_nxv2i16: @@ -13037,18 +13037,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i64(float*, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i64(,,,,,, float*, , , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i64(,,,,,, float*, , , i64, i64) define @test_vluxseg6_nxv2f32_nxv2i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2f32_nxv2i64: @@ -13072,18 +13072,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg6ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i32(float*, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i32(,,,,,,, float*, , , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i32(,,,,,,, float*, , , i64, i64) define @test_vluxseg7_nxv2f32_nxv2i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2f32_nxv2i32: @@ -13108,18 +13108,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i8(float*, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i8(,,,,,,, float*, , , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i8(,,,,,,, float*, , , i64, i64) define @test_vluxseg7_nxv2f32_nxv2i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2f32_nxv2i8: @@ -13144,18 +13144,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i16(float*, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i16(,,,,,,, float*, , , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i16(,,,,,,, float*, , , i64, i64) define @test_vluxseg7_nxv2f32_nxv2i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2f32_nxv2i16: @@ -13180,18 +13180,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i64(float*, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i64(,,,,,,, float*, , , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i64(,,,,,,, float*, , , i64, i64) define @test_vluxseg7_nxv2f32_nxv2i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2f32_nxv2i64: @@ -13216,18 +13216,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg7ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i32(float*, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i32(,,,,,,,, float*, , , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i32(,,,,,,,, float*, , , i64, i64) define @test_vluxseg8_nxv2f32_nxv2i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2f32_nxv2i32: @@ -13253,18 +13253,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i8(float*, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i8(,,,,,,,, float*, , , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i8(,,,,,,,, float*, , , i64, i64) define @test_vluxseg8_nxv2f32_nxv2i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2f32_nxv2i8: @@ -13290,18 +13290,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i16(float*, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i16(,,,,,,,, float*, , , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i16(,,,,,,,, float*, , , i64, i64) define @test_vluxseg8_nxv2f32_nxv2i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2f32_nxv2i16: @@ -13327,18 +13327,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i64(float*, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i64(,,,,,,,, float*, , , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i64(,,,,,,,, float*, , , i64, i64) define @test_vluxseg8_nxv2f32_nxv2i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2f32_nxv2i64: @@ -13364,18 +13364,18 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg8ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv1f16.nxv1i64(half*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i64(,, half*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i64(,, half*, , , i64, i64) define @test_vluxseg2_nxv1f16_nxv1i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f16_nxv1i64: @@ -13394,18 +13394,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i64( %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i64( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv1f16.nxv1i32(half*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i32(,, half*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i32(,, half*, , , i64, i64) define @test_vluxseg2_nxv1f16_nxv1i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f16_nxv1i32: @@ -13424,18 +13424,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i32( %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i32( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv1f16.nxv1i16(half*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i16(,, half*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i16(,, half*, , , i64, i64) define @test_vluxseg2_nxv1f16_nxv1i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f16_nxv1i16: @@ -13454,18 +13454,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i16( %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i16( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv1f16.nxv1i8(half*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i8(,, half*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i8(,, half*, , , i64, i64) define @test_vluxseg2_nxv1f16_nxv1i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f16_nxv1i8: @@ -13484,18 +13484,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i8( %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i8( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv1f16.nxv1i64(half*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i64(,,, half*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i64(,,, half*, , , i64, i64) define @test_vluxseg3_nxv1f16_nxv1i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f16_nxv1i64: @@ -13516,18 +13516,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg3ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i64( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i64( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv1f16.nxv1i32(half*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i32(,,, half*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i32(,,, half*, , , i64, i64) define @test_vluxseg3_nxv1f16_nxv1i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f16_nxv1i32: @@ -13548,18 +13548,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i32( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i32( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv1f16.nxv1i16(half*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i16(,,, half*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i16(,,, half*, , , i64, i64) define @test_vluxseg3_nxv1f16_nxv1i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f16_nxv1i16: @@ -13580,18 +13580,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv1f16.nxv1i8(half*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i8(,,, half*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i8(,,, half*, , , i64, i64) define @test_vluxseg3_nxv1f16_nxv1i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f16_nxv1i8: @@ -13612,18 +13612,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv1f16.nxv1i64(half*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i64(,,,, half*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i64(,,,, half*, , , i64, i64) define @test_vluxseg4_nxv1f16_nxv1i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f16_nxv1i64: @@ -13645,18 +13645,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg4ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv1f16.nxv1i32(half*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i32(,,,, half*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i32(,,,, half*, , , i64, i64) define @test_vluxseg4_nxv1f16_nxv1i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f16_nxv1i32: @@ -13678,18 +13678,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv1f16.nxv1i16(half*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i16(,,,, half*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i16(,,,, half*, , , i64, i64) define @test_vluxseg4_nxv1f16_nxv1i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f16_nxv1i16: @@ -13711,18 +13711,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv1f16.nxv1i8(half*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i8(,,,, half*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i8(,,,, half*, , , i64, i64) define @test_vluxseg4_nxv1f16_nxv1i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f16_nxv1i8: @@ -13744,18 +13744,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv1f16.nxv1i64(half*, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i64(,,,,, half*, , , i64) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i64(,,,,, half*, , , i64, i64) define @test_vluxseg5_nxv1f16_nxv1i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f16_nxv1i64: @@ -13778,18 +13778,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg5ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv1f16.nxv1i32(half*, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i32(,,,,, half*, , , i64) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i32(,,,,, half*, , , i64, i64) define @test_vluxseg5_nxv1f16_nxv1i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f16_nxv1i32: @@ -13812,18 +13812,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv1f16.nxv1i16(half*, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i16(,,,,, half*, , , i64) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i16(,,,,, half*, , , i64, i64) define @test_vluxseg5_nxv1f16_nxv1i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f16_nxv1i16: @@ -13846,18 +13846,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv1f16.nxv1i8(half*, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i8(,,,,, half*, , , i64) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i8(,,,,, half*, , , i64, i64) define @test_vluxseg5_nxv1f16_nxv1i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f16_nxv1i8: @@ -13880,18 +13880,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f16.nxv1i64(half*, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i64(,,,,,, half*, , , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i64(,,,,,, half*, , , i64, i64) define @test_vluxseg6_nxv1f16_nxv1i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f16_nxv1i64: @@ -13915,18 +13915,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg6ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f16.nxv1i32(half*, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i32(,,,,,, half*, , , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i32(,,,,,, half*, , , i64, i64) define @test_vluxseg6_nxv1f16_nxv1i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f16_nxv1i32: @@ -13950,18 +13950,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f16.nxv1i16(half*, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i16(,,,,,, half*, , , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i16(,,,,,, half*, , , i64, i64) define @test_vluxseg6_nxv1f16_nxv1i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f16_nxv1i16: @@ -13985,18 +13985,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f16.nxv1i8(half*, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i8(,,,,,, half*, , , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i8(,,,,,, half*, , , i64, i64) define @test_vluxseg6_nxv1f16_nxv1i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f16_nxv1i8: @@ -14020,18 +14020,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f16.nxv1i64(half*, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i64(,,,,,,, half*, , , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i64(,,,,,,, half*, , , i64, i64) define @test_vluxseg7_nxv1f16_nxv1i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f16_nxv1i64: @@ -14056,18 +14056,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg7ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f16.nxv1i32(half*, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i32(,,,,,,, half*, , , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i32(,,,,,,, half*, , , i64, i64) define @test_vluxseg7_nxv1f16_nxv1i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f16_nxv1i32: @@ -14092,18 +14092,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f16.nxv1i16(half*, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i16(,,,,,,, half*, , , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i16(,,,,,,, half*, , , i64, i64) define @test_vluxseg7_nxv1f16_nxv1i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f16_nxv1i16: @@ -14128,18 +14128,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f16.nxv1i8(half*, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i8(,,,,,,, half*, , , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i8(,,,,,,, half*, , , i64, i64) define @test_vluxseg7_nxv1f16_nxv1i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f16_nxv1i8: @@ -14164,18 +14164,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f16.nxv1i64(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i64(,,,,,,,, half*, , , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i64(,,,,,,,, half*, , , i64, i64) define @test_vluxseg8_nxv1f16_nxv1i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f16_nxv1i64: @@ -14201,18 +14201,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f16.nxv1i32(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i32(,,,,,,,, half*, , , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i32(,,,,,,,, half*, , , i64, i64) define @test_vluxseg8_nxv1f16_nxv1i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f16_nxv1i32: @@ -14238,18 +14238,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f16.nxv1i16(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i16(,,,,,,,, half*, , , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i16(,,,,,,,, half*, , , i64, i64) define @test_vluxseg8_nxv1f16_nxv1i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f16_nxv1i16: @@ -14275,18 +14275,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f16.nxv1i8(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i8(,,,,,,,, half*, , , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i8(,,,,,,,, half*, , , i64, i64) define @test_vluxseg8_nxv1f16_nxv1i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f16_nxv1i8: @@ -14312,18 +14312,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv1f32.nxv1i64(float*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i64(,, float*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i64(,, float*, , , i64, i64) define @test_vluxseg2_nxv1f32_nxv1i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f32_nxv1i64: @@ -14342,18 +14342,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i64( %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i64( %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv1f32.nxv1i32(float*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i32(,, float*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i32(,, float*, , , i64, i64) define @test_vluxseg2_nxv1f32_nxv1i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f32_nxv1i32: @@ -14372,18 +14372,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i32( %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i32( %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv1f32.nxv1i16(float*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i16(,, float*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i16(,, float*, , , i64, i64) define @test_vluxseg2_nxv1f32_nxv1i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f32_nxv1i16: @@ -14402,18 +14402,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i16( %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i16( %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv1f32.nxv1i8(float*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i8(,, float*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i8(,, float*, , , i64, i64) define @test_vluxseg2_nxv1f32_nxv1i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f32_nxv1i8: @@ -14432,18 +14432,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i8( %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i8( %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv1f32.nxv1i64(float*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i64(,,, float*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i64(,,, float*, , , i64, i64) define @test_vluxseg3_nxv1f32_nxv1i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f32_nxv1i64: @@ -14464,18 +14464,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i64( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i64( %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv1f32.nxv1i32(float*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i32(,,, float*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i32(,,, float*, , , i64, i64) define @test_vluxseg3_nxv1f32_nxv1i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f32_nxv1i32: @@ -14496,18 +14496,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i32( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i32( %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv1f32.nxv1i16(float*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i16(,,, float*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i16(,,, float*, , , i64, i64) define @test_vluxseg3_nxv1f32_nxv1i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f32_nxv1i16: @@ -14528,18 +14528,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i16( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i16( %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv1f32.nxv1i8(float*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i8(,,, float*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i8(,,, float*, , , i64, i64) define @test_vluxseg3_nxv1f32_nxv1i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f32_nxv1i8: @@ -14560,18 +14560,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i8( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i8( %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv1f32.nxv1i64(float*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i64(,,,, float*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i64(,,,, float*, , , i64, i64) define @test_vluxseg4_nxv1f32_nxv1i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f32_nxv1i64: @@ -14593,18 +14593,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv1f32.nxv1i32(float*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i32(,,,, float*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i32(,,,, float*, , , i64, i64) define @test_vluxseg4_nxv1f32_nxv1i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f32_nxv1i32: @@ -14626,18 +14626,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv1f32.nxv1i16(float*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i16(,,,, float*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i16(,,,, float*, , , i64, i64) define @test_vluxseg4_nxv1f32_nxv1i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f32_nxv1i16: @@ -14659,18 +14659,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv1f32.nxv1i8(float*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i8(,,,, float*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i8(,,,, float*, , , i64, i64) define @test_vluxseg4_nxv1f32_nxv1i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f32_nxv1i8: @@ -14692,18 +14692,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv1f32.nxv1i64(float*, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i64(,,,,, float*, , , i64) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i64(,,,,, float*, , , i64, i64) define @test_vluxseg5_nxv1f32_nxv1i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f32_nxv1i64: @@ -14726,18 +14726,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv1f32.nxv1i32(float*, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i32(,,,,, float*, , , i64) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i32(,,,,, float*, , , i64, i64) define @test_vluxseg5_nxv1f32_nxv1i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f32_nxv1i32: @@ -14760,18 +14760,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv1f32.nxv1i16(float*, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i16(,,,,, float*, , , i64) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i16(,,,,, float*, , , i64, i64) define @test_vluxseg5_nxv1f32_nxv1i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f32_nxv1i16: @@ -14794,18 +14794,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv1f32.nxv1i8(float*, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i8(,,,,, float*, , , i64) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i8(,,,,, float*, , , i64, i64) define @test_vluxseg5_nxv1f32_nxv1i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f32_nxv1i8: @@ -14828,18 +14828,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f32.nxv1i64(float*, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i64(,,,,,, float*, , , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i64(,,,,,, float*, , , i64, i64) define @test_vluxseg6_nxv1f32_nxv1i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f32_nxv1i64: @@ -14863,18 +14863,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f32.nxv1i32(float*, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i32(,,,,,, float*, , , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i32(,,,,,, float*, , , i64, i64) define @test_vluxseg6_nxv1f32_nxv1i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f32_nxv1i32: @@ -14898,18 +14898,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f32.nxv1i16(float*, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i16(,,,,,, float*, , , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i16(,,,,,, float*, , , i64, i64) define @test_vluxseg6_nxv1f32_nxv1i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f32_nxv1i16: @@ -14933,18 +14933,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f32.nxv1i8(float*, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i8(,,,,,, float*, , , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i8(,,,,,, float*, , , i64, i64) define @test_vluxseg6_nxv1f32_nxv1i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f32_nxv1i8: @@ -14968,18 +14968,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f32.nxv1i64(float*, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i64(,,,,,,, float*, , , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i64(,,,,,,, float*, , , i64, i64) define @test_vluxseg7_nxv1f32_nxv1i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f32_nxv1i64: @@ -15004,18 +15004,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f32.nxv1i32(float*, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i32(,,,,,,, float*, , , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i32(,,,,,,, float*, , , i64, i64) define @test_vluxseg7_nxv1f32_nxv1i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f32_nxv1i32: @@ -15040,18 +15040,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f32.nxv1i16(float*, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i16(,,,,,,, float*, , , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i16(,,,,,,, float*, , , i64, i64) define @test_vluxseg7_nxv1f32_nxv1i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f32_nxv1i16: @@ -15076,18 +15076,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f32.nxv1i8(float*, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i8(,,,,,,, float*, , , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i8(,,,,,,, float*, , , i64, i64) define @test_vluxseg7_nxv1f32_nxv1i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f32_nxv1i8: @@ -15112,18 +15112,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f32.nxv1i64(float*, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i64(,,,,,,,, float*, , , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i64(,,,,,,,, float*, , , i64, i64) define @test_vluxseg8_nxv1f32_nxv1i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f32_nxv1i64: @@ -15149,18 +15149,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f32.nxv1i32(float*, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i32(,,,,,,,, float*, , , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i32(,,,,,,,, float*, , , i64, i64) define @test_vluxseg8_nxv1f32_nxv1i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f32_nxv1i32: @@ -15186,18 +15186,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f32.nxv1i16(float*, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i16(,,,,,,,, float*, , , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i16(,,,,,,,, float*, , , i64, i64) define @test_vluxseg8_nxv1f32_nxv1i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f32_nxv1i16: @@ -15223,18 +15223,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f32.nxv1i8(float*, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i8(,,,,,,,, float*, , , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i8(,,,,,,,, float*, , , i64, i64) define @test_vluxseg8_nxv1f32_nxv1i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f32_nxv1i8: @@ -15260,18 +15260,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i16(half*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i16(,, half*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i16(,, half*, , , i64, i64) define @test_vluxseg2_nxv8f16_nxv8i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8f16_nxv8i16: @@ -15290,18 +15290,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i16( %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i16( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i8(half*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i8(,, half*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i8(,, half*, , , i64, i64) define @test_vluxseg2_nxv8f16_nxv8i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8f16_nxv8i8: @@ -15320,18 +15320,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i8( %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i8( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i64(half*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i64(,, half*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i64(,, half*, , , i64, i64) define @test_vluxseg2_nxv8f16_nxv8i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8f16_nxv8i64: @@ -15350,18 +15350,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv8f16_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v6, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i64( %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i64( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i32(half*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i32(,, half*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i32(,, half*, , , i64, i64) define @test_vluxseg2_nxv8f16_nxv8i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8f16_nxv8i32: @@ -15380,18 +15380,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i32( %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i32( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i16(half*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i16(,,, half*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i16(,,, half*, , , i64, i64) define @test_vluxseg3_nxv8f16_nxv8i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8f16_nxv8i16: @@ -15412,18 +15412,18 @@ ; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i8(half*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i8(,,, half*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i8(,,, half*, , , i64, i64) define @test_vluxseg3_nxv8f16_nxv8i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8f16_nxv8i8: @@ -15444,18 +15444,18 @@ ; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i64(half*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i64(,,, half*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i64(,,, half*, , , i64, i64) define @test_vluxseg3_nxv8f16_nxv8i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8f16_nxv8i64: @@ -15475,18 +15475,18 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg3ei64.v v6, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i64( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i64( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i32(half*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i32(,,, half*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i32(,,, half*, , , i64, i64) define @test_vluxseg3_nxv8f16_nxv8i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8f16_nxv8i32: @@ -15506,18 +15506,18 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v6, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i32( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i32( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i16(half*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i16(,,,, half*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i16(,,,, half*, , , i64, i64) define @test_vluxseg4_nxv8f16_nxv8i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8f16_nxv8i16: @@ -15539,18 +15539,18 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i8(half*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i8(,,,, half*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i8(,,,, half*, , , i64, i64) define @test_vluxseg4_nxv8f16_nxv8i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8f16_nxv8i8: @@ -15572,18 +15572,18 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i64(half*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i64(,,,, half*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i64(,,,, half*, , , i64, i64) define @test_vluxseg4_nxv8f16_nxv8i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8f16_nxv8i64: @@ -15604,18 +15604,18 @@ ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vmv2r.v v12, v6 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg4ei64.v v6, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i64( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i64( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i32(half*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i32(,,,, half*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i32(,,,, half*, , , i64, i64) define @test_vluxseg4_nxv8f16_nxv8i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8f16_nxv8i32: @@ -15637,18 +15637,18 @@ ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i16(float*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i16(,, float*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i16(,, float*, , , i64, i64) define @test_vluxseg2_nxv8f32_nxv8i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8f32_nxv8i16: @@ -15667,18 +15667,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i16( %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i16( %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i8(float*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i8(,, float*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i8(,, float*, , , i64, i64) define @test_vluxseg2_nxv8f32_nxv8i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8f32_nxv8i8: @@ -15697,18 +15697,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv8f32_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i8( %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i8( %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i64(float*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i64(,, float*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i64(,, float*, , , i64, i64) define @test_vluxseg2_nxv8f32_nxv8i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8f32_nxv8i64: @@ -15727,18 +15727,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v4, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i64( %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i64( %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i32(float*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i32(,, float*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i32(,, float*, , , i64, i64) define @test_vluxseg2_nxv8f32_nxv8i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8f32_nxv8i32: @@ -15757,18 +15757,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i32( %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i32( %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i32(double*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i32(,, double*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i32(,, double*, , , i64, i64) define @test_vluxseg2_nxv2f64_nxv2i32(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f64_nxv2i32: @@ -15787,18 +15787,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i32( %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i32( %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i8(double*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i8(,, double*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i8(,, double*, , , i64, i64) define @test_vluxseg2_nxv2f64_nxv2i8(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f64_nxv2i8: @@ -15817,18 +15817,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i8( %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i8( %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i16(double*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i16(,, double*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i16(,, double*, , , i64, i64) define @test_vluxseg2_nxv2f64_nxv2i16(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f64_nxv2i16: @@ -15847,18 +15847,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i16( %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i16( %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i64(double*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i64(,, double*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i64(,, double*, , , i64, i64) define @test_vluxseg2_nxv2f64_nxv2i64(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f64_nxv2i64: @@ -15877,18 +15877,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i64( %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i64( %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i32(double*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i32(,,, double*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i32(,,, double*, , , i64, i64) define @test_vluxseg3_nxv2f64_nxv2i32(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f64_nxv2i32: @@ -15909,18 +15909,18 @@ ; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i32( %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i32( %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i8(double*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i8(,,, double*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i8(,,, double*, , , i64, i64) define @test_vluxseg3_nxv2f64_nxv2i8(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f64_nxv2i8: @@ -15941,18 +15941,18 @@ ; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i8( %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i8( %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i16(double*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i16(,,, double*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i16(,,, double*, , , i64, i64) define @test_vluxseg3_nxv2f64_nxv2i16(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f64_nxv2i16: @@ -15973,18 +15973,18 @@ ; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i16( %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i16( %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i64(double*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i64(,,, double*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i64(,,, double*, , , i64, i64) define @test_vluxseg3_nxv2f64_nxv2i64(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f64_nxv2i64: @@ -16005,18 +16005,18 @@ ; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg3ei64.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i64( %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i64( %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i32(double*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i32(,,,, double*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i32(,,,, double*, , , i64, i64) define @test_vluxseg4_nxv2f64_nxv2i32(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f64_nxv2i32: @@ -16038,18 +16038,18 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i32( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i32( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i8(double*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i8(,,,, double*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i8(,,,, double*, , , i64, i64) define @test_vluxseg4_nxv2f64_nxv2i8(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f64_nxv2i8: @@ -16071,18 +16071,18 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i8( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i8( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i16(double*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i16(,,,, double*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i16(,,,, double*, , , i64, i64) define @test_vluxseg4_nxv2f64_nxv2i16(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f64_nxv2i16: @@ -16104,18 +16104,18 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i16( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i16( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i64(double*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i64(,,,, double*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i64(,,,, double*, , , i64, i64) define @test_vluxseg4_nxv2f64_nxv2i64(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f64_nxv2i64: @@ -16137,18 +16137,18 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg4ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i64( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i64( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i32(half*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i32(,, half*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i32(,, half*, , , i64, i64) define @test_vluxseg2_nxv4f16_nxv4i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f16_nxv4i32: @@ -16167,18 +16167,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i32( %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i32( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i8(half*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i8(,, half*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i8(,, half*, , , i64, i64) define @test_vluxseg2_nxv4f16_nxv4i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f16_nxv4i8: @@ -16197,18 +16197,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i8( %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i8( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i64(half*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i64(,, half*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i64(,, half*, , , i64, i64) define @test_vluxseg2_nxv4f16_nxv4i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f16_nxv4i64: @@ -16227,18 +16227,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i64( %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i64( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i16(half*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i16(,, half*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i16(,, half*, , , i64, i64) define @test_vluxseg2_nxv4f16_nxv4i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f16_nxv4i16: @@ -16257,18 +16257,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i16( %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i16( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i32(half*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i32(,,, half*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i32(,,, half*, , , i64, i64) define @test_vluxseg3_nxv4f16_nxv4i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4f16_nxv4i32: @@ -16288,18 +16288,18 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i32( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i32( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i8(half*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i8(,,, half*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i8(,,, half*, , , i64, i64) define @test_vluxseg3_nxv4f16_nxv4i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4f16_nxv4i8: @@ -16320,18 +16320,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i64(half*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i64(,,, half*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i64(,,, half*, , , i64, i64) define @test_vluxseg3_nxv4f16_nxv4i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4f16_nxv4i64: @@ -16351,18 +16351,18 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i64( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i64( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i16(half*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i16(,,, half*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i16(,,, half*, , , i64, i64) define @test_vluxseg3_nxv4f16_nxv4i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4f16_nxv4i16: @@ -16383,18 +16383,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i32(half*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i32(,,,, half*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i32(,,,, half*, , , i64, i64) define @test_vluxseg4_nxv4f16_nxv4i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4f16_nxv4i32: @@ -16416,18 +16416,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i8(half*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i8(,,,, half*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i8(,,,, half*, , , i64, i64) define @test_vluxseg4_nxv4f16_nxv4i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4f16_nxv4i8: @@ -16449,18 +16449,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i64(half*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i64(,,,, half*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i64(,,,, half*, , , i64, i64) define @test_vluxseg4_nxv4f16_nxv4i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4f16_nxv4i64: @@ -16481,18 +16481,18 @@ ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg4ei64.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i16(half*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i16(,,,, half*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i16(,,,, half*, , , i64, i64) define @test_vluxseg4_nxv4f16_nxv4i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4f16_nxv4i16: @@ -16514,18 +16514,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i32(half*, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i32(,,,,, half*, , , i64) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i32(,,,,, half*, , , i64, i64) define @test_vluxseg5_nxv4f16_nxv4i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4f16_nxv4i32: @@ -16548,18 +16548,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i8(half*, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i8(,,,,, half*, , , i64) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i8(,,,,, half*, , , i64, i64) define @test_vluxseg5_nxv4f16_nxv4i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4f16_nxv4i8: @@ -16582,18 +16582,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i64(half*, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i64(,,,,, half*, , , i64) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i64(,,,,, half*, , , i64, i64) define @test_vluxseg5_nxv4f16_nxv4i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4f16_nxv4i64: @@ -16615,18 +16615,18 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg5ei64.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i16(half*, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i16(,,,,, half*, , , i64) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i16(,,,,, half*, , , i64, i64) define @test_vluxseg5_nxv4f16_nxv4i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4f16_nxv4i16: @@ -16649,18 +16649,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i32(half*, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i32(,,,,,, half*, , , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i32(,,,,,, half*, , , i64, i64) define @test_vluxseg6_nxv4f16_nxv4i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4f16_nxv4i32: @@ -16684,18 +16684,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i8(half*, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i8(,,,,,, half*, , , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i8(,,,,,, half*, , , i64, i64) define @test_vluxseg6_nxv4f16_nxv4i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4f16_nxv4i8: @@ -16719,18 +16719,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i64(half*, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i64(,,,,,, half*, , , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i64(,,,,,, half*, , , i64, i64) define @test_vluxseg6_nxv4f16_nxv4i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4f16_nxv4i64: @@ -16754,18 +16754,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg6ei64.v v1, (a0), v12, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i16(half*, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i16(,,,,,, half*, , , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i16(,,,,,, half*, , , i64, i64) define @test_vluxseg6_nxv4f16_nxv4i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4f16_nxv4i16: @@ -16789,18 +16789,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i32(half*, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i32(,,,,,,, half*, , , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i32(,,,,,,, half*, , , i64, i64) define @test_vluxseg7_nxv4f16_nxv4i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4f16_nxv4i32: @@ -16825,18 +16825,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i8(half*, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i8(,,,,,,, half*, , , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i8(,,,,,,, half*, , , i64, i64) define @test_vluxseg7_nxv4f16_nxv4i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4f16_nxv4i8: @@ -16861,18 +16861,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i64(half*, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i64(,,,,,,, half*, , , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i64(,,,,,,, half*, , , i64, i64) define @test_vluxseg7_nxv4f16_nxv4i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4f16_nxv4i64: @@ -16897,18 +16897,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg7ei64.v v1, (a0), v12, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i16(half*, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i16(,,,,,,, half*, , , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i16(,,,,,,, half*, , , i64, i64) define @test_vluxseg7_nxv4f16_nxv4i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4f16_nxv4i16: @@ -16933,18 +16933,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i32(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i32(,,,,,,,, half*, , , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i32(,,,,,,,, half*, , , i64, i64) define @test_vluxseg8_nxv4f16_nxv4i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4f16_nxv4i32: @@ -16970,18 +16970,18 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i8(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i8(,,,,,,,, half*, , , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i8(,,,,,,,, half*, , , i64, i64) define @test_vluxseg8_nxv4f16_nxv4i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4f16_nxv4i8: @@ -17007,18 +17007,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i64(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i64(,,,,,,,, half*, , , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i64(,,,,,,,, half*, , , i64, i64) define @test_vluxseg8_nxv4f16_nxv4i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4f16_nxv4i64: @@ -17044,18 +17044,18 @@ ; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vmv1r.v v22, v16 ; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg8ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i16(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i16(,,,,,,,, half*, , , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i16(,,,,,,,, half*, , , i64, i64) define @test_vluxseg8_nxv4f16_nxv4i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4f16_nxv4i16: @@ -17081,18 +17081,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv2f16.nxv2i32(half*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i32(,, half*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i32(,, half*, , , i64, i64) define @test_vluxseg2_nxv2f16_nxv2i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f16_nxv2i32: @@ -17111,18 +17111,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i32( %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i32( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv2f16.nxv2i8(half*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i8(,, half*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i8(,, half*, , , i64, i64) define @test_vluxseg2_nxv2f16_nxv2i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f16_nxv2i8: @@ -17141,18 +17141,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i8( %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i8( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv2f16.nxv2i16(half*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i16(,, half*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i16(,, half*, , , i64, i64) define @test_vluxseg2_nxv2f16_nxv2i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f16_nxv2i16: @@ -17171,18 +17171,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i16( %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i16( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv2f16.nxv2i64(half*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i64(,, half*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i64(,, half*, , , i64, i64) define @test_vluxseg2_nxv2f16_nxv2i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f16_nxv2i64: @@ -17201,18 +17201,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i64( %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i64( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv2f16.nxv2i32(half*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i32(,,, half*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i32(,,, half*, , , i64, i64) define @test_vluxseg3_nxv2f16_nxv2i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f16_nxv2i32: @@ -17233,18 +17233,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i32( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i32( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv2f16.nxv2i8(half*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i8(,,, half*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i8(,,, half*, , , i64, i64) define @test_vluxseg3_nxv2f16_nxv2i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f16_nxv2i8: @@ -17265,18 +17265,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv2f16.nxv2i16(half*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i16(,,, half*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i16(,,, half*, , , i64, i64) define @test_vluxseg3_nxv2f16_nxv2i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f16_nxv2i16: @@ -17297,18 +17297,18 @@ ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv2f16.nxv2i64(half*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i64(,,, half*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i64(,,, half*, , , i64, i64) define @test_vluxseg3_nxv2f16_nxv2i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f16_nxv2i64: @@ -17328,18 +17328,18 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i64( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i64( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv2f16.nxv2i32(half*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i32(,,,, half*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i32(,,,, half*, , , i64, i64) define @test_vluxseg4_nxv2f16_nxv2i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f16_nxv2i32: @@ -17361,18 +17361,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv2f16.nxv2i8(half*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i8(,,,, half*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i8(,,,, half*, , , i64, i64) define @test_vluxseg4_nxv2f16_nxv2i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f16_nxv2i8: @@ -17394,18 +17394,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv2f16.nxv2i16(half*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i16(,,,, half*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i16(,,,, half*, , , i64, i64) define @test_vluxseg4_nxv2f16_nxv2i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f16_nxv2i16: @@ -17427,18 +17427,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv2f16.nxv2i64(half*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i64(,,,, half*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i64(,,,, half*, , , i64, i64) define @test_vluxseg4_nxv2f16_nxv2i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f16_nxv2i64: @@ -17460,18 +17460,18 @@ ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv2f16.nxv2i32(half*, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i32(,,,,, half*, , , i64) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i32(,,,,, half*, , , i64, i64) define @test_vluxseg5_nxv2f16_nxv2i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2f16_nxv2i32: @@ -17494,18 +17494,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv2f16.nxv2i8(half*, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i8(,,,,, half*, , , i64) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i8(,,,,, half*, , , i64, i64) define @test_vluxseg5_nxv2f16_nxv2i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2f16_nxv2i8: @@ -17528,18 +17528,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv2f16.nxv2i16(half*, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i16(,,,,, half*, , , i64) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i16(,,,,, half*, , , i64, i64) define @test_vluxseg5_nxv2f16_nxv2i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2f16_nxv2i16: @@ -17562,18 +17562,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,} @llvm.riscv.vluxseg5.nxv2f16.nxv2i64(half*, , i64) -declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i64(,,,,, half*, , , i64) +declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i64(,,,,, half*, , , i64, i64) define @test_vluxseg5_nxv2f16_nxv2i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2f16_nxv2i64: @@ -17596,18 +17596,18 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv2f16.nxv2i32(half*, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i32(,,,,,, half*, , , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i32(,,,,,, half*, , , i64, i64) define @test_vluxseg6_nxv2f16_nxv2i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2f16_nxv2i32: @@ -17631,18 +17631,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv2f16.nxv2i8(half*, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i8(,,,,,, half*, , , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i8(,,,,,, half*, , , i64, i64) define @test_vluxseg6_nxv2f16_nxv2i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2f16_nxv2i8: @@ -17666,18 +17666,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv2f16.nxv2i16(half*, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i16(,,,,,, half*, , , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i16(,,,,,, half*, , , i64, i64) define @test_vluxseg6_nxv2f16_nxv2i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2f16_nxv2i16: @@ -17701,18 +17701,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,} @llvm.riscv.vluxseg6.nxv2f16.nxv2i64(half*, , i64) -declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i64(,,,,,, half*, , , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i64(,,,,,, half*, , , i64, i64) define @test_vluxseg6_nxv2f16_nxv2i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2f16_nxv2i64: @@ -17736,18 +17736,18 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2f16.nxv2i32(half*, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i32(,,,,,,, half*, , , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i32(,,,,,,, half*, , , i64, i64) define @test_vluxseg7_nxv2f16_nxv2i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2f16_nxv2i32: @@ -17772,18 +17772,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2f16.nxv2i8(half*, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i8(,,,,,,, half*, , , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i8(,,,,,,, half*, , , i64, i64) define @test_vluxseg7_nxv2f16_nxv2i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2f16_nxv2i8: @@ -17808,18 +17808,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2f16.nxv2i16(half*, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i16(,,,,,,, half*, , , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i16(,,,,,,, half*, , , i64, i64) define @test_vluxseg7_nxv2f16_nxv2i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2f16_nxv2i16: @@ -17844,18 +17844,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2f16.nxv2i64(half*, , i64) -declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i64(,,,,,,, half*, , , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i64(,,,,,,, half*, , , i64, i64) define @test_vluxseg7_nxv2f16_nxv2i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2f16_nxv2i64: @@ -17880,18 +17880,18 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f16.nxv2i32(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i32(,,,,,,,, half*, , , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i32(,,,,,,,, half*, , , i64, i64) define @test_vluxseg8_nxv2f16_nxv2i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2f16_nxv2i32: @@ -17917,18 +17917,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f16.nxv2i8(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i8(,,,,,,,, half*, , , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i8(,,,,,,,, half*, , , i64, i64) define @test_vluxseg8_nxv2f16_nxv2i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2f16_nxv2i8: @@ -17954,18 +17954,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f16.nxv2i16(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i16(,,,,,,,, half*, , , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i16(,,,,,,,, half*, , , i64, i64) define @test_vluxseg8_nxv2f16_nxv2i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2f16_nxv2i16: @@ -17991,18 +17991,18 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f16.nxv2i64(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i64(,,,,,,,, half*, , , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i64(,,,,,,,, half*, , , i64, i64) define @test_vluxseg8_nxv2f16_nxv2i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2f16_nxv2i64: @@ -18028,18 +18028,18 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i32(float*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i32(,, float*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i32(,, float*, , , i64, i64) define @test_vluxseg2_nxv4f32_nxv4i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f32_nxv4i32: @@ -18058,18 +18058,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i32( %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i32( %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i8(float*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i8(,, float*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i8(,, float*, , , i64, i64) define @test_vluxseg2_nxv4f32_nxv4i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f32_nxv4i8: @@ -18088,18 +18088,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i8( %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i8( %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i64(float*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i64(,, float*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i64(,, float*, , , i64, i64) define @test_vluxseg2_nxv4f32_nxv4i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f32_nxv4i64: @@ -18118,18 +18118,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v6, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i64( %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i64( %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i16(float*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i16(,, float*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i16(,, float*, , , i64, i64) define @test_vluxseg2_nxv4f32_nxv4i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f32_nxv4i16: @@ -18148,18 +18148,18 @@ ; CHECK-LABEL: test_vluxseg2_mask_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i16( %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i16( %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i32(float*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i32(,,, float*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i32(,,, float*, , , i64, i64) define @test_vluxseg3_nxv4f32_nxv4i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4f32_nxv4i32: @@ -18180,18 +18180,18 @@ ; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i32( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i32( %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i8(float*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i8(,,, float*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i8(,,, float*, , , i64, i64) define @test_vluxseg3_nxv4f32_nxv4i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4f32_nxv4i8: @@ -18212,18 +18212,18 @@ ; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i8( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i8( %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i64(float*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i64(,,, float*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i64(,,, float*, , , i64, i64) define @test_vluxseg3_nxv4f32_nxv4i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4f32_nxv4i64: @@ -18243,18 +18243,18 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg3ei64.v v6, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i64( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i64( %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i16(float*, , i64) -declare {,,} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i16(,,, float*, , , i64) +declare {,,} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i16(,,, float*, , , i64, i64) define @test_vluxseg3_nxv4f32_nxv4i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4f32_nxv4i16: @@ -18275,18 +18275,18 @@ ; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i16( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i16( %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i32(float*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i32(,,,, float*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i32(,,,, float*, , , i64, i64) define @test_vluxseg4_nxv4f32_nxv4i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4f32_nxv4i32: @@ -18308,18 +18308,18 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i32( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i32( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i8(float*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i8(,,,, float*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i8(,,,, float*, , , i64, i64) define @test_vluxseg4_nxv4f32_nxv4i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4f32_nxv4i8: @@ -18341,18 +18341,18 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i8( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i8( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i64(float*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i64(,,,, float*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i64(,,,, float*, , , i64, i64) define @test_vluxseg4_nxv4f32_nxv4i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4f32_nxv4i64: @@ -18374,18 +18374,18 @@ ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg4ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i64( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i64( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } declare {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i16(float*, , i64) -declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i16(,,,, float*, , , i64) +declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i16(,,,, float*, , , i64, i64) define @test_vluxseg4_nxv4f32_nxv4i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4f32_nxv4i16: @@ -18407,12 +18407,12 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i16( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i16( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) %1 = extractvalue {,,,} %0, 1 ret %1 } diff --git a/llvm/test/CodeGen/RISCV/rvv/vmax-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmax-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmax-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmax-rv32.ll @@ -26,12 +26,13 @@ , , , + i32, i32); define @intrinsic_vmax_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vmax.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -70,12 +71,13 @@ , , , + i32, i32); define @intrinsic_vmax_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vmax.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -114,12 +116,13 @@ , , , + i32, i32); define @intrinsic_vmax_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vmax.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -158,12 +161,13 @@ , , , + i32, i32); define @intrinsic_vmax_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmax.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -202,12 +206,13 @@ , , , + i32, i32); define @intrinsic_vmax_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vmax.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -246,12 +251,13 @@ , , , + i32, i32); define @intrinsic_vmax_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vmax.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -260,7 +266,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -290,13 +296,14 @@ , , , + i32, i32); define @intrinsic_vmax_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vmax.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -335,12 +342,13 @@ , , , + i32, i32); define @intrinsic_vmax_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vmax.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -379,12 +387,13 @@ , , , + i32, i32); define @intrinsic_vmax_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vmax.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -423,12 +432,13 @@ , , , + i32, i32); define @intrinsic_vmax_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmax.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -467,12 +477,13 @@ , , , + i32, i32); define @intrinsic_vmax_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vmax.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -481,7 +492,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -511,12 +522,13 @@ , , , + i32, i32); define @intrinsic_vmax_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vmax.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -525,7 +537,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -555,13 +567,14 @@ , , , + i32, i32); define @intrinsic_vmax_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vmax.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -600,12 +613,13 @@ , , , + i32, i32); define @intrinsic_vmax_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vmax.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -644,12 +658,13 @@ , , , + i32, i32); define @intrinsic_vmax_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmax.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -658,7 +673,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -688,12 +703,13 @@ , , , + i32, i32); define @intrinsic_vmax_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vmax.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -702,7 +718,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -732,12 +748,13 @@ , , , + i32, i32); define @intrinsic_vmax_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vmax.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +763,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -776,13 +793,14 @@ , , , + i32, i32); define @intrinsic_vmax_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vmax.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +809,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -821,12 +839,13 @@ , , , + i32, i32); define @intrinsic_vmax_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmax.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +854,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -865,12 +884,13 @@ , , , + i32, i32); define @intrinsic_vmax_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vmax.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +899,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -909,12 +929,13 @@ , , , + i32, i32); define @intrinsic_vmax_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vmax.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +944,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -953,13 +974,14 @@ , , , + i32, i32); define @intrinsic_vmax_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vmax.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -968,7 +990,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -998,12 +1020,13 @@ , i8, , + i32, i32); define @intrinsic_vmax_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vmax.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1012,7 +1035,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1042,12 +1065,13 @@ , i8, , + i32, i32); define @intrinsic_vmax_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vmax.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1056,7 +1080,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1086,12 +1110,13 @@ , i8, , + i32, i32); define @intrinsic_vmax_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vmax.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1100,7 +1125,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1130,12 +1155,13 @@ , i8, , + i32, i32); define @intrinsic_vmax_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmax.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1144,7 +1170,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1174,12 +1200,13 @@ , i8, , + i32, i32); define @intrinsic_vmax_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vmax.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1188,7 +1215,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1218,12 +1245,13 @@ , i8, , + i32, i32); define @intrinsic_vmax_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vmax.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1232,7 +1260,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1262,12 +1290,13 @@ , i8, , + i32, i32); define @intrinsic_vmax_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vmax.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1276,7 +1305,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1306,12 +1335,13 @@ , i16, , + i32, i32); define @intrinsic_vmax_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmax.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1320,7 +1350,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1350,12 +1380,13 @@ , i16, , + i32, i32); define @intrinsic_vmax_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmax.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1364,7 +1395,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1394,12 +1425,13 @@ , i16, , + i32, i32); define @intrinsic_vmax_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmax.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1408,7 +1440,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1438,12 +1470,13 @@ , i16, , + i32, i32); define @intrinsic_vmax_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vmax.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1452,7 +1485,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1482,12 +1515,13 @@ , i16, , + i32, i32); define @intrinsic_vmax_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vmax.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1496,7 +1530,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1526,12 +1560,13 @@ , i16, , + i32, i32); define @intrinsic_vmax_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vmax.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1540,7 +1575,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1570,12 +1605,13 @@ , i32, , + i32, i32); define @intrinsic_vmax_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmax.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1584,7 +1620,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1614,12 +1650,13 @@ , i32, , + i32, i32); define @intrinsic_vmax_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmax.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1628,7 +1665,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1658,12 +1695,13 @@ , i32, , + i32, i32); define @intrinsic_vmax_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vmax.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1672,7 +1710,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1702,12 +1740,13 @@ , i32, , + i32, i32); define @intrinsic_vmax_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vmax.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1716,7 +1755,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1746,12 +1785,13 @@ , i32, , + i32, i32); define @intrinsic_vmax_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vmax.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1760,7 +1800,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1796,6 +1836,7 @@ , i64, , + i32, i32); define @intrinsic_vmax_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1804,10 +1845,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v25, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vmax.vv v8, v9, v25, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1817,7 +1858,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1853,6 +1894,7 @@ , i64, , + i32, i32); define @intrinsic_vmax_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1861,10 +1903,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v26, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vmax.vv v8, v10, v26, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1874,7 +1916,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1910,6 +1952,7 @@ , i64, , + i32, i32); define @intrinsic_vmax_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1918,10 +1961,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v28, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vmax.vv v8, v12, v28, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1931,7 +1974,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1967,6 +2010,7 @@ , i64, , + i32, i32); define @intrinsic_vmax_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1975,10 +2019,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v24, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vmax.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1988,7 +2032,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vmax-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmax-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmax-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmax-rv64.ll @@ -26,12 +26,13 @@ , , , + i64, i64); define @intrinsic_vmax_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vmax.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -70,12 +71,13 @@ , , , + i64, i64); define @intrinsic_vmax_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vmax.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -114,12 +116,13 @@ , , , + i64, i64); define @intrinsic_vmax_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vmax.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -158,12 +161,13 @@ , , , + i64, i64); define @intrinsic_vmax_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmax.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -202,12 +206,13 @@ , , , + i64, i64); define @intrinsic_vmax_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vmax.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -246,12 +251,13 @@ , , , + i64, i64); define @intrinsic_vmax_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vmax.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -260,7 +266,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -290,13 +296,14 @@ , , , + i64, i64); define @intrinsic_vmax_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vmax.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -335,12 +342,13 @@ , , , + i64, i64); define @intrinsic_vmax_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vmax.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -379,12 +387,13 @@ , , , + i64, i64); define @intrinsic_vmax_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vmax.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -423,12 +432,13 @@ , , , + i64, i64); define @intrinsic_vmax_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmax.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -467,12 +477,13 @@ , , , + i64, i64); define @intrinsic_vmax_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vmax.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -481,7 +492,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -511,12 +522,13 @@ , , , + i64, i64); define @intrinsic_vmax_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vmax.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -525,7 +537,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -555,13 +567,14 @@ , , , + i64, i64); define @intrinsic_vmax_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vmax.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -600,12 +613,13 @@ , , , + i64, i64); define @intrinsic_vmax_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vmax.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -644,12 +658,13 @@ , , , + i64, i64); define @intrinsic_vmax_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmax.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -658,7 +673,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -688,12 +703,13 @@ , , , + i64, i64); define @intrinsic_vmax_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vmax.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -702,7 +718,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -732,12 +748,13 @@ , , , + i64, i64); define @intrinsic_vmax_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vmax.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +763,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -776,13 +793,14 @@ , , , + i64, i64); define @intrinsic_vmax_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vmax.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +809,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -821,12 +839,13 @@ , , , + i64, i64); define @intrinsic_vmax_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmax.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +854,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -865,12 +884,13 @@ , , , + i64, i64); define @intrinsic_vmax_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vmax.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +899,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -909,12 +929,13 @@ , , , + i64, i64); define @intrinsic_vmax_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vmax.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +944,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -953,13 +974,14 @@ , , , + i64, i64); define @intrinsic_vmax_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vmax.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -968,7 +990,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -998,12 +1020,13 @@ , i8, , + i64, i64); define @intrinsic_vmax_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vmax.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1012,7 +1035,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1042,12 +1065,13 @@ , i8, , + i64, i64); define @intrinsic_vmax_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vmax.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1056,7 +1080,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1086,12 +1110,13 @@ , i8, , + i64, i64); define @intrinsic_vmax_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vmax.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1100,7 +1125,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1130,12 +1155,13 @@ , i8, , + i64, i64); define @intrinsic_vmax_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmax.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1144,7 +1170,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1174,12 +1200,13 @@ , i8, , + i64, i64); define @intrinsic_vmax_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vmax.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1188,7 +1215,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1218,12 +1245,13 @@ , i8, , + i64, i64); define @intrinsic_vmax_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vmax.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1232,7 +1260,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1262,12 +1290,13 @@ , i8, , + i64, i64); define @intrinsic_vmax_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vmax.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1276,7 +1305,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1306,12 +1335,13 @@ , i16, , + i64, i64); define @intrinsic_vmax_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmax.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1320,7 +1350,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1350,12 +1380,13 @@ , i16, , + i64, i64); define @intrinsic_vmax_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmax.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1364,7 +1395,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1394,12 +1425,13 @@ , i16, , + i64, i64); define @intrinsic_vmax_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmax.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1408,7 +1440,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1438,12 +1470,13 @@ , i16, , + i64, i64); define @intrinsic_vmax_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vmax.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1452,7 +1485,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1482,12 +1515,13 @@ , i16, , + i64, i64); define @intrinsic_vmax_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vmax.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1496,7 +1530,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1526,12 +1560,13 @@ , i16, , + i64, i64); define @intrinsic_vmax_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vmax.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1540,7 +1575,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1570,12 +1605,13 @@ , i32, , + i64, i64); define @intrinsic_vmax_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmax.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1584,7 +1620,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1614,12 +1650,13 @@ , i32, , + i64, i64); define @intrinsic_vmax_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmax.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1628,7 +1665,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1658,12 +1695,13 @@ , i32, , + i64, i64); define @intrinsic_vmax_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vmax.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1672,7 +1710,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1702,12 +1740,13 @@ , i32, , + i64, i64); define @intrinsic_vmax_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vmax.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1716,7 +1755,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1746,12 +1785,13 @@ , i32, , + i64, i64); define @intrinsic_vmax_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vmax.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1760,7 +1800,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1790,12 +1830,13 @@ , i64, , + i64, i64); define @intrinsic_vmax_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vmax.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1804,7 +1845,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1834,12 +1875,13 @@ , i64, , + i64, i64); define @intrinsic_vmax_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vmax.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1848,7 +1890,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1878,12 +1920,13 @@ , i64, , + i64, i64); define @intrinsic_vmax_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vmax.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1892,7 +1935,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1922,12 +1965,13 @@ , i64, , + i64, i64); define @intrinsic_vmax_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vmax.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1936,7 +1980,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv32.ll @@ -26,12 +26,13 @@ , , , + i32, i32); define @intrinsic_vmaxu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -70,12 +71,13 @@ , , , + i32, i32); define @intrinsic_vmaxu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -114,12 +116,13 @@ , , , + i32, i32); define @intrinsic_vmaxu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -158,12 +161,13 @@ , , , + i32, i32); define @intrinsic_vmaxu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -202,12 +206,13 @@ , , , + i32, i32); define @intrinsic_vmaxu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -246,12 +251,13 @@ , , , + i32, i32); define @intrinsic_vmaxu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -260,7 +266,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -290,13 +296,14 @@ , , , + i32, i32); define @intrinsic_vmaxu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -335,12 +342,13 @@ , , , + i32, i32); define @intrinsic_vmaxu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -379,12 +387,13 @@ , , , + i32, i32); define @intrinsic_vmaxu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -423,12 +432,13 @@ , , , + i32, i32); define @intrinsic_vmaxu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -467,12 +477,13 @@ , , , + i32, i32); define @intrinsic_vmaxu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -481,7 +492,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -511,12 +522,13 @@ , , , + i32, i32); define @intrinsic_vmaxu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -525,7 +537,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -555,13 +567,14 @@ , , , + i32, i32); define @intrinsic_vmaxu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -600,12 +613,13 @@ , , , + i32, i32); define @intrinsic_vmaxu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -644,12 +658,13 @@ , , , + i32, i32); define @intrinsic_vmaxu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -658,7 +673,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -688,12 +703,13 @@ , , , + i32, i32); define @intrinsic_vmaxu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -702,7 +718,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -732,12 +748,13 @@ , , , + i32, i32); define @intrinsic_vmaxu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +763,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -776,13 +793,14 @@ , , , + i32, i32); define @intrinsic_vmaxu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +809,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -821,12 +839,13 @@ , , , + i32, i32); define @intrinsic_vmaxu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +854,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -865,12 +884,13 @@ , , , + i32, i32); define @intrinsic_vmaxu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +899,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -909,12 +929,13 @@ , , , + i32, i32); define @intrinsic_vmaxu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +944,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -953,13 +974,14 @@ , , , + i32, i32); define @intrinsic_vmaxu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -968,7 +990,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -998,12 +1020,13 @@ , i8, , + i32, i32); define @intrinsic_vmaxu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1012,7 +1035,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1042,12 +1065,13 @@ , i8, , + i32, i32); define @intrinsic_vmaxu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1056,7 +1080,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1086,12 +1110,13 @@ , i8, , + i32, i32); define @intrinsic_vmaxu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1100,7 +1125,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1130,12 +1155,13 @@ , i8, , + i32, i32); define @intrinsic_vmaxu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1144,7 +1170,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1174,12 +1200,13 @@ , i8, , + i32, i32); define @intrinsic_vmaxu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1188,7 +1215,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1218,12 +1245,13 @@ , i8, , + i32, i32); define @intrinsic_vmaxu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1232,7 +1260,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1262,12 +1290,13 @@ , i8, , + i32, i32); define @intrinsic_vmaxu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1276,7 +1305,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1306,12 +1335,13 @@ , i16, , + i32, i32); define @intrinsic_vmaxu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1320,7 +1350,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1350,12 +1380,13 @@ , i16, , + i32, i32); define @intrinsic_vmaxu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1364,7 +1395,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1394,12 +1425,13 @@ , i16, , + i32, i32); define @intrinsic_vmaxu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1408,7 +1440,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1438,12 +1470,13 @@ , i16, , + i32, i32); define @intrinsic_vmaxu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1452,7 +1485,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1482,12 +1515,13 @@ , i16, , + i32, i32); define @intrinsic_vmaxu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1496,7 +1530,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1526,12 +1560,13 @@ , i16, , + i32, i32); define @intrinsic_vmaxu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1540,7 +1575,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1570,12 +1605,13 @@ , i32, , + i32, i32); define @intrinsic_vmaxu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1584,7 +1620,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1614,12 +1650,13 @@ , i32, , + i32, i32); define @intrinsic_vmaxu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1628,7 +1665,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1658,12 +1695,13 @@ , i32, , + i32, i32); define @intrinsic_vmaxu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1672,7 +1710,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1702,12 +1740,13 @@ , i32, , + i32, i32); define @intrinsic_vmaxu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1716,7 +1755,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1746,12 +1785,13 @@ , i32, , + i32, i32); define @intrinsic_vmaxu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1760,7 +1800,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1796,6 +1836,7 @@ , i64, , + i32, i32); define @intrinsic_vmaxu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1804,10 +1845,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v25, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v9, v25, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1817,7 +1858,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1853,6 +1894,7 @@ , i64, , + i32, i32); define @intrinsic_vmaxu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1861,10 +1903,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v26, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v10, v26, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1874,7 +1916,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1910,6 +1952,7 @@ , i64, , + i32, i32); define @intrinsic_vmaxu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1918,10 +1961,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v28, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v12, v28, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1931,7 +1974,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1967,6 +2010,7 @@ , i64, , + i32, i32); define @intrinsic_vmaxu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1975,10 +2019,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v24, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1988,7 +2032,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv64.ll @@ -26,12 +26,13 @@ , , , + i64, i64); define @intrinsic_vmaxu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -70,12 +71,13 @@ , , , + i64, i64); define @intrinsic_vmaxu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -114,12 +116,13 @@ , , , + i64, i64); define @intrinsic_vmaxu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -158,12 +161,13 @@ , , , + i64, i64); define @intrinsic_vmaxu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -202,12 +206,13 @@ , , , + i64, i64); define @intrinsic_vmaxu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -246,12 +251,13 @@ , , , + i64, i64); define @intrinsic_vmaxu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -260,7 +266,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -290,13 +296,14 @@ , , , + i64, i64); define @intrinsic_vmaxu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -335,12 +342,13 @@ , , , + i64, i64); define @intrinsic_vmaxu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -379,12 +387,13 @@ , , , + i64, i64); define @intrinsic_vmaxu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -423,12 +432,13 @@ , , , + i64, i64); define @intrinsic_vmaxu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -467,12 +477,13 @@ , , , + i64, i64); define @intrinsic_vmaxu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -481,7 +492,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -511,12 +522,13 @@ , , , + i64, i64); define @intrinsic_vmaxu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -525,7 +537,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -555,13 +567,14 @@ , , , + i64, i64); define @intrinsic_vmaxu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -600,12 +613,13 @@ , , , + i64, i64); define @intrinsic_vmaxu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -644,12 +658,13 @@ , , , + i64, i64); define @intrinsic_vmaxu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -658,7 +673,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -688,12 +703,13 @@ , , , + i64, i64); define @intrinsic_vmaxu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -702,7 +718,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -732,12 +748,13 @@ , , , + i64, i64); define @intrinsic_vmaxu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +763,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -776,13 +793,14 @@ , , , + i64, i64); define @intrinsic_vmaxu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +809,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -821,12 +839,13 @@ , , , + i64, i64); define @intrinsic_vmaxu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +854,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -865,12 +884,13 @@ , , , + i64, i64); define @intrinsic_vmaxu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +899,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -909,12 +929,13 @@ , , , + i64, i64); define @intrinsic_vmaxu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +944,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -953,13 +974,14 @@ , , , + i64, i64); define @intrinsic_vmaxu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -968,7 +990,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -998,12 +1020,13 @@ , i8, , + i64, i64); define @intrinsic_vmaxu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1012,7 +1035,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1042,12 +1065,13 @@ , i8, , + i64, i64); define @intrinsic_vmaxu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1056,7 +1080,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1086,12 +1110,13 @@ , i8, , + i64, i64); define @intrinsic_vmaxu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1100,7 +1125,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1130,12 +1155,13 @@ , i8, , + i64, i64); define @intrinsic_vmaxu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1144,7 +1170,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1174,12 +1200,13 @@ , i8, , + i64, i64); define @intrinsic_vmaxu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1188,7 +1215,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1218,12 +1245,13 @@ , i8, , + i64, i64); define @intrinsic_vmaxu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1232,7 +1260,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1262,12 +1290,13 @@ , i8, , + i64, i64); define @intrinsic_vmaxu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1276,7 +1305,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1306,12 +1335,13 @@ , i16, , + i64, i64); define @intrinsic_vmaxu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1320,7 +1350,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1350,12 +1380,13 @@ , i16, , + i64, i64); define @intrinsic_vmaxu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1364,7 +1395,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1394,12 +1425,13 @@ , i16, , + i64, i64); define @intrinsic_vmaxu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1408,7 +1440,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1438,12 +1470,13 @@ , i16, , + i64, i64); define @intrinsic_vmaxu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1452,7 +1485,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1482,12 +1515,13 @@ , i16, , + i64, i64); define @intrinsic_vmaxu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1496,7 +1530,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1526,12 +1560,13 @@ , i16, , + i64, i64); define @intrinsic_vmaxu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1540,7 +1575,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1570,12 +1605,13 @@ , i32, , + i64, i64); define @intrinsic_vmaxu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1584,7 +1620,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1614,12 +1650,13 @@ , i32, , + i64, i64); define @intrinsic_vmaxu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1628,7 +1665,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1658,12 +1695,13 @@ , i32, , + i64, i64); define @intrinsic_vmaxu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1672,7 +1710,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1702,12 +1740,13 @@ , i32, , + i64, i64); define @intrinsic_vmaxu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1716,7 +1755,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1746,12 +1785,13 @@ , i32, , + i64, i64); define @intrinsic_vmaxu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1760,7 +1800,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1790,12 +1830,13 @@ , i64, , + i64, i64); define @intrinsic_vmaxu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1804,7 +1845,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1834,12 +1875,13 @@ , i64, , + i64, i64); define @intrinsic_vmaxu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1848,7 +1890,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1878,12 +1920,13 @@ , i64, , + i64, i64); define @intrinsic_vmaxu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1892,7 +1935,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1922,12 +1965,13 @@ , i64, , + i64, i64); define @intrinsic_vmaxu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1936,7 +1980,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vmin-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmin-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmin-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmin-rv32.ll @@ -26,12 +26,13 @@ , , , + i32, i32); define @intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vmin.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -70,12 +71,13 @@ , , , + i32, i32); define @intrinsic_vmin_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vmin.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -114,12 +116,13 @@ , , , + i32, i32); define @intrinsic_vmin_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vmin.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -158,12 +161,13 @@ , , , + i32, i32); define @intrinsic_vmin_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmin.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -202,12 +206,13 @@ , , , + i32, i32); define @intrinsic_vmin_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vmin.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -246,12 +251,13 @@ , , , + i32, i32); define @intrinsic_vmin_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vmin.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -260,7 +266,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -290,13 +296,14 @@ , , , + i32, i32); define @intrinsic_vmin_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vmin.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -335,12 +342,13 @@ , , , + i32, i32); define @intrinsic_vmin_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vmin.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -379,12 +387,13 @@ , , , + i32, i32); define @intrinsic_vmin_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vmin.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -423,12 +432,13 @@ , , , + i32, i32); define @intrinsic_vmin_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmin.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -467,12 +477,13 @@ , , , + i32, i32); define @intrinsic_vmin_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vmin.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -481,7 +492,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -511,12 +522,13 @@ , , , + i32, i32); define @intrinsic_vmin_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vmin.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -525,7 +537,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -555,13 +567,14 @@ , , , + i32, i32); define @intrinsic_vmin_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vmin.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -600,12 +613,13 @@ , , , + i32, i32); define @intrinsic_vmin_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vmin.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -644,12 +658,13 @@ , , , + i32, i32); define @intrinsic_vmin_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmin.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -658,7 +673,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -688,12 +703,13 @@ , , , + i32, i32); define @intrinsic_vmin_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vmin.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -702,7 +718,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -732,12 +748,13 @@ , , , + i32, i32); define @intrinsic_vmin_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vmin.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +763,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -776,13 +793,14 @@ , , , + i32, i32); define @intrinsic_vmin_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vmin.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +809,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -821,12 +839,13 @@ , , , + i32, i32); define @intrinsic_vmin_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmin.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +854,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -865,12 +884,13 @@ , , , + i32, i32); define @intrinsic_vmin_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vmin.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +899,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -909,12 +929,13 @@ , , , + i32, i32); define @intrinsic_vmin_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vmin.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +944,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -953,13 +974,14 @@ , , , + i32, i32); define @intrinsic_vmin_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vmin.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -968,7 +990,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -998,12 +1020,13 @@ , i8, , + i32, i32); define @intrinsic_vmin_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vmin.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1012,7 +1035,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1042,12 +1065,13 @@ , i8, , + i32, i32); define @intrinsic_vmin_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vmin.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1056,7 +1080,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1086,12 +1110,13 @@ , i8, , + i32, i32); define @intrinsic_vmin_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vmin.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1100,7 +1125,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1130,12 +1155,13 @@ , i8, , + i32, i32); define @intrinsic_vmin_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmin.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1144,7 +1170,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1174,12 +1200,13 @@ , i8, , + i32, i32); define @intrinsic_vmin_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vmin.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1188,7 +1215,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1218,12 +1245,13 @@ , i8, , + i32, i32); define @intrinsic_vmin_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vmin.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1232,7 +1260,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1262,12 +1290,13 @@ , i8, , + i32, i32); define @intrinsic_vmin_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vmin.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1276,7 +1305,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1306,12 +1335,13 @@ , i16, , + i32, i32); define @intrinsic_vmin_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmin.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1320,7 +1350,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1350,12 +1380,13 @@ , i16, , + i32, i32); define @intrinsic_vmin_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmin.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1364,7 +1395,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1394,12 +1425,13 @@ , i16, , + i32, i32); define @intrinsic_vmin_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmin.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1408,7 +1440,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1438,12 +1470,13 @@ , i16, , + i32, i32); define @intrinsic_vmin_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vmin.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1452,7 +1485,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1482,12 +1515,13 @@ , i16, , + i32, i32); define @intrinsic_vmin_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vmin.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1496,7 +1530,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1526,12 +1560,13 @@ , i16, , + i32, i32); define @intrinsic_vmin_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vmin.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1540,7 +1575,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1570,12 +1605,13 @@ , i32, , + i32, i32); define @intrinsic_vmin_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmin.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1584,7 +1620,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1614,12 +1650,13 @@ , i32, , + i32, i32); define @intrinsic_vmin_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmin.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1628,7 +1665,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1658,12 +1695,13 @@ , i32, , + i32, i32); define @intrinsic_vmin_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vmin.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1672,7 +1710,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1702,12 +1740,13 @@ , i32, , + i32, i32); define @intrinsic_vmin_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vmin.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1716,7 +1755,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1746,12 +1785,13 @@ , i32, , + i32, i32); define @intrinsic_vmin_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vmin.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1760,7 +1800,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1796,6 +1836,7 @@ , i64, , + i32, i32); define @intrinsic_vmin_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1804,10 +1845,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v25, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vmin.vv v8, v9, v25, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1817,7 +1858,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1853,6 +1894,7 @@ , i64, , + i32, i32); define @intrinsic_vmin_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1861,10 +1903,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v26, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vmin.vv v8, v10, v26, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1874,7 +1916,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1910,6 +1952,7 @@ , i64, , + i32, i32); define @intrinsic_vmin_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1918,10 +1961,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v28, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vmin.vv v8, v12, v28, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1931,7 +1974,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1967,6 +2010,7 @@ , i64, , + i32, i32); define @intrinsic_vmin_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1975,10 +2019,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v24, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vmin.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1988,7 +2032,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vmin-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmin-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmin-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmin-rv64.ll @@ -26,12 +26,13 @@ , , , + i64, i64); define @intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vmin.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -70,12 +71,13 @@ , , , + i64, i64); define @intrinsic_vmin_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vmin.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -114,12 +116,13 @@ , , , + i64, i64); define @intrinsic_vmin_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vmin.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -158,12 +161,13 @@ , , , + i64, i64); define @intrinsic_vmin_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmin.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -202,12 +206,13 @@ , , , + i64, i64); define @intrinsic_vmin_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vmin.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -246,12 +251,13 @@ , , , + i64, i64); define @intrinsic_vmin_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vmin.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -260,7 +266,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -290,13 +296,14 @@ , , , + i64, i64); define @intrinsic_vmin_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vmin.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -335,12 +342,13 @@ , , , + i64, i64); define @intrinsic_vmin_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vmin.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -379,12 +387,13 @@ , , , + i64, i64); define @intrinsic_vmin_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vmin.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -423,12 +432,13 @@ , , , + i64, i64); define @intrinsic_vmin_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmin.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -467,12 +477,13 @@ , , , + i64, i64); define @intrinsic_vmin_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vmin.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -481,7 +492,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -511,12 +522,13 @@ , , , + i64, i64); define @intrinsic_vmin_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vmin.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -525,7 +537,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -555,13 +567,14 @@ , , , + i64, i64); define @intrinsic_vmin_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vmin.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -600,12 +613,13 @@ , , , + i64, i64); define @intrinsic_vmin_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vmin.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -644,12 +658,13 @@ , , , + i64, i64); define @intrinsic_vmin_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmin.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -658,7 +673,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -688,12 +703,13 @@ , , , + i64, i64); define @intrinsic_vmin_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vmin.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -702,7 +718,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -732,12 +748,13 @@ , , , + i64, i64); define @intrinsic_vmin_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vmin.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +763,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -776,13 +793,14 @@ , , , + i64, i64); define @intrinsic_vmin_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vmin.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +809,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -821,12 +839,13 @@ , , , + i64, i64); define @intrinsic_vmin_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmin.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +854,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -865,12 +884,13 @@ , , , + i64, i64); define @intrinsic_vmin_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vmin.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +899,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -909,12 +929,13 @@ , , , + i64, i64); define @intrinsic_vmin_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vmin.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +944,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -953,13 +974,14 @@ , , , + i64, i64); define @intrinsic_vmin_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vmin.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -968,7 +990,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -998,12 +1020,13 @@ , i8, , + i64, i64); define @intrinsic_vmin_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vmin.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1012,7 +1035,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1042,12 +1065,13 @@ , i8, , + i64, i64); define @intrinsic_vmin_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vmin.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1056,7 +1080,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1086,12 +1110,13 @@ , i8, , + i64, i64); define @intrinsic_vmin_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vmin.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1100,7 +1125,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1130,12 +1155,13 @@ , i8, , + i64, i64); define @intrinsic_vmin_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmin.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1144,7 +1170,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1174,12 +1200,13 @@ , i8, , + i64, i64); define @intrinsic_vmin_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vmin.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1188,7 +1215,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1218,12 +1245,13 @@ , i8, , + i64, i64); define @intrinsic_vmin_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vmin.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1232,7 +1260,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1262,12 +1290,13 @@ , i8, , + i64, i64); define @intrinsic_vmin_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vmin.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1276,7 +1305,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1306,12 +1335,13 @@ , i16, , + i64, i64); define @intrinsic_vmin_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmin.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1320,7 +1350,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1350,12 +1380,13 @@ , i16, , + i64, i64); define @intrinsic_vmin_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmin.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1364,7 +1395,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1394,12 +1425,13 @@ , i16, , + i64, i64); define @intrinsic_vmin_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmin.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1408,7 +1440,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1438,12 +1470,13 @@ , i16, , + i64, i64); define @intrinsic_vmin_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vmin.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1452,7 +1485,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1482,12 +1515,13 @@ , i16, , + i64, i64); define @intrinsic_vmin_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vmin.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1496,7 +1530,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1526,12 +1560,13 @@ , i16, , + i64, i64); define @intrinsic_vmin_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vmin.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1540,7 +1575,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1570,12 +1605,13 @@ , i32, , + i64, i64); define @intrinsic_vmin_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmin.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1584,7 +1620,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1614,12 +1650,13 @@ , i32, , + i64, i64); define @intrinsic_vmin_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmin.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1628,7 +1665,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1658,12 +1695,13 @@ , i32, , + i64, i64); define @intrinsic_vmin_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vmin.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1672,7 +1710,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1702,12 +1740,13 @@ , i32, , + i64, i64); define @intrinsic_vmin_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vmin.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1716,7 +1755,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1746,12 +1785,13 @@ , i32, , + i64, i64); define @intrinsic_vmin_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vmin.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1760,7 +1800,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1790,12 +1830,13 @@ , i64, , + i64, i64); define @intrinsic_vmin_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vmin.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1804,7 +1845,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1834,12 +1875,13 @@ , i64, , + i64, i64); define @intrinsic_vmin_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vmin.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1848,7 +1890,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1878,12 +1920,13 @@ , i64, , + i64, i64); define @intrinsic_vmin_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vmin.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1892,7 +1935,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1922,12 +1965,13 @@ , i64, , + i64, i64); define @intrinsic_vmin_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vmin.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1936,7 +1980,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vminu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vminu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vminu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vminu-rv32.ll @@ -26,12 +26,13 @@ , , , + i32, i32); define @intrinsic_vminu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vminu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -70,12 +71,13 @@ , , , + i32, i32); define @intrinsic_vminu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vminu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -114,12 +116,13 @@ , , , + i32, i32); define @intrinsic_vminu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vminu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -158,12 +161,13 @@ , , , + i32, i32); define @intrinsic_vminu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vminu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -202,12 +206,13 @@ , , , + i32, i32); define @intrinsic_vminu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vminu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -246,12 +251,13 @@ , , , + i32, i32); define @intrinsic_vminu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vminu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -260,7 +266,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -290,13 +296,14 @@ , , , + i32, i32); define @intrinsic_vminu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vminu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -335,12 +342,13 @@ , , , + i32, i32); define @intrinsic_vminu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vminu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -379,12 +387,13 @@ , , , + i32, i32); define @intrinsic_vminu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vminu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -423,12 +432,13 @@ , , , + i32, i32); define @intrinsic_vminu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vminu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -467,12 +477,13 @@ , , , + i32, i32); define @intrinsic_vminu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vminu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -481,7 +492,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -511,12 +522,13 @@ , , , + i32, i32); define @intrinsic_vminu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vminu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -525,7 +537,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -555,13 +567,14 @@ , , , + i32, i32); define @intrinsic_vminu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vminu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -600,12 +613,13 @@ , , , + i32, i32); define @intrinsic_vminu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vminu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -644,12 +658,13 @@ , , , + i32, i32); define @intrinsic_vminu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vminu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -658,7 +673,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -688,12 +703,13 @@ , , , + i32, i32); define @intrinsic_vminu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vminu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -702,7 +718,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -732,12 +748,13 @@ , , , + i32, i32); define @intrinsic_vminu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vminu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +763,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -776,13 +793,14 @@ , , , + i32, i32); define @intrinsic_vminu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vminu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +809,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -821,12 +839,13 @@ , , , + i32, i32); define @intrinsic_vminu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vminu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +854,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -865,12 +884,13 @@ , , , + i32, i32); define @intrinsic_vminu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vminu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +899,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -909,12 +929,13 @@ , , , + i32, i32); define @intrinsic_vminu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vminu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +944,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -953,13 +974,14 @@ , , , + i32, i32); define @intrinsic_vminu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vminu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -968,7 +990,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -998,12 +1020,13 @@ , i8, , + i32, i32); define @intrinsic_vminu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vminu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1012,7 +1035,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1042,12 +1065,13 @@ , i8, , + i32, i32); define @intrinsic_vminu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vminu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1056,7 +1080,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1086,12 +1110,13 @@ , i8, , + i32, i32); define @intrinsic_vminu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vminu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1100,7 +1125,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1130,12 +1155,13 @@ , i8, , + i32, i32); define @intrinsic_vminu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vminu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1144,7 +1170,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1174,12 +1200,13 @@ , i8, , + i32, i32); define @intrinsic_vminu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vminu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1188,7 +1215,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1218,12 +1245,13 @@ , i8, , + i32, i32); define @intrinsic_vminu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vminu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1232,7 +1260,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1262,12 +1290,13 @@ , i8, , + i32, i32); define @intrinsic_vminu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vminu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1276,7 +1305,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1306,12 +1335,13 @@ , i16, , + i32, i32); define @intrinsic_vminu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vminu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1320,7 +1350,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1350,12 +1380,13 @@ , i16, , + i32, i32); define @intrinsic_vminu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vminu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1364,7 +1395,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1394,12 +1425,13 @@ , i16, , + i32, i32); define @intrinsic_vminu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vminu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1408,7 +1440,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1438,12 +1470,13 @@ , i16, , + i32, i32); define @intrinsic_vminu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vminu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1452,7 +1485,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1482,12 +1515,13 @@ , i16, , + i32, i32); define @intrinsic_vminu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vminu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1496,7 +1530,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1526,12 +1560,13 @@ , i16, , + i32, i32); define @intrinsic_vminu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vminu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1540,7 +1575,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1570,12 +1605,13 @@ , i32, , + i32, i32); define @intrinsic_vminu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vminu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1584,7 +1620,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1614,12 +1650,13 @@ , i32, , + i32, i32); define @intrinsic_vminu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vminu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1628,7 +1665,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1658,12 +1695,13 @@ , i32, , + i32, i32); define @intrinsic_vminu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vminu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1672,7 +1710,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1702,12 +1740,13 @@ , i32, , + i32, i32); define @intrinsic_vminu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vminu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1716,7 +1755,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1746,12 +1785,13 @@ , i32, , + i32, i32); define @intrinsic_vminu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vminu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1760,7 +1800,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1796,6 +1836,7 @@ , i64, , + i32, i32); define @intrinsic_vminu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1804,10 +1845,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v25, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vminu.vv v8, v9, v25, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1817,7 +1858,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1853,6 +1894,7 @@ , i64, , + i32, i32); define @intrinsic_vminu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1861,10 +1903,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v26, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vminu.vv v8, v10, v26, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1874,7 +1916,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1910,6 +1952,7 @@ , i64, , + i32, i32); define @intrinsic_vminu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1918,10 +1961,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v28, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vminu.vv v8, v12, v28, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1931,7 +1974,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1967,6 +2010,7 @@ , i64, , + i32, i32); define @intrinsic_vminu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1975,10 +2019,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v24, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vminu.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1988,7 +2032,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vminu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vminu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vminu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vminu-rv64.ll @@ -26,12 +26,13 @@ , , , + i64, i64); define @intrinsic_vminu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vminu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -70,12 +71,13 @@ , , , + i64, i64); define @intrinsic_vminu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vminu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -114,12 +116,13 @@ , , , + i64, i64); define @intrinsic_vminu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vminu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -158,12 +161,13 @@ , , , + i64, i64); define @intrinsic_vminu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vminu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -202,12 +206,13 @@ , , , + i64, i64); define @intrinsic_vminu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vminu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -246,12 +251,13 @@ , , , + i64, i64); define @intrinsic_vminu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vminu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -260,7 +266,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -290,13 +296,14 @@ , , , + i64, i64); define @intrinsic_vminu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vminu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -335,12 +342,13 @@ , , , + i64, i64); define @intrinsic_vminu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vminu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -379,12 +387,13 @@ , , , + i64, i64); define @intrinsic_vminu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vminu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -423,12 +432,13 @@ , , , + i64, i64); define @intrinsic_vminu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vminu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -467,12 +477,13 @@ , , , + i64, i64); define @intrinsic_vminu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vminu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -481,7 +492,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -511,12 +522,13 @@ , , , + i64, i64); define @intrinsic_vminu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vminu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -525,7 +537,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -555,13 +567,14 @@ , , , + i64, i64); define @intrinsic_vminu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vminu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -600,12 +613,13 @@ , , , + i64, i64); define @intrinsic_vminu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vminu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -644,12 +658,13 @@ , , , + i64, i64); define @intrinsic_vminu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vminu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -658,7 +673,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -688,12 +703,13 @@ , , , + i64, i64); define @intrinsic_vminu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vminu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -702,7 +718,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -732,12 +748,13 @@ , , , + i64, i64); define @intrinsic_vminu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vminu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +763,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -776,13 +793,14 @@ , , , + i64, i64); define @intrinsic_vminu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vminu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +809,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -821,12 +839,13 @@ , , , + i64, i64); define @intrinsic_vminu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vminu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +854,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -865,12 +884,13 @@ , , , + i64, i64); define @intrinsic_vminu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vminu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +899,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -909,12 +929,13 @@ , , , + i64, i64); define @intrinsic_vminu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vminu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +944,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -953,13 +974,14 @@ , , , + i64, i64); define @intrinsic_vminu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vminu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -968,7 +990,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -998,12 +1020,13 @@ , i8, , + i64, i64); define @intrinsic_vminu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vminu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1012,7 +1035,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1042,12 +1065,13 @@ , i8, , + i64, i64); define @intrinsic_vminu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vminu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1056,7 +1080,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1086,12 +1110,13 @@ , i8, , + i64, i64); define @intrinsic_vminu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vminu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1100,7 +1125,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1130,12 +1155,13 @@ , i8, , + i64, i64); define @intrinsic_vminu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vminu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1144,7 +1170,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1174,12 +1200,13 @@ , i8, , + i64, i64); define @intrinsic_vminu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vminu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1188,7 +1215,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1218,12 +1245,13 @@ , i8, , + i64, i64); define @intrinsic_vminu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vminu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1232,7 +1260,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1262,12 +1290,13 @@ , i8, , + i64, i64); define @intrinsic_vminu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vminu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1276,7 +1305,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1306,12 +1335,13 @@ , i16, , + i64, i64); define @intrinsic_vminu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vminu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1320,7 +1350,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1350,12 +1380,13 @@ , i16, , + i64, i64); define @intrinsic_vminu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vminu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1364,7 +1395,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1394,12 +1425,13 @@ , i16, , + i64, i64); define @intrinsic_vminu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vminu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1408,7 +1440,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1438,12 +1470,13 @@ , i16, , + i64, i64); define @intrinsic_vminu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vminu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1452,7 +1485,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1482,12 +1515,13 @@ , i16, , + i64, i64); define @intrinsic_vminu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vminu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1496,7 +1530,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1526,12 +1560,13 @@ , i16, , + i64, i64); define @intrinsic_vminu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vminu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1540,7 +1575,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1570,12 +1605,13 @@ , i32, , + i64, i64); define @intrinsic_vminu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vminu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1584,7 +1620,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1614,12 +1650,13 @@ , i32, , + i64, i64); define @intrinsic_vminu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vminu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1628,7 +1665,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1658,12 +1695,13 @@ , i32, , + i64, i64); define @intrinsic_vminu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vminu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1672,7 +1710,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1702,12 +1740,13 @@ , i32, , + i64, i64); define @intrinsic_vminu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vminu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1716,7 +1755,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1746,12 +1785,13 @@ , i32, , + i64, i64); define @intrinsic_vminu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vminu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1760,7 +1800,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1790,12 +1830,13 @@ , i64, , + i64, i64); define @intrinsic_vminu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vminu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1804,7 +1845,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1834,12 +1875,13 @@ , i64, , + i64, i64); define @intrinsic_vminu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vminu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1848,7 +1890,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1878,12 +1920,13 @@ , i64, , + i64, i64); define @intrinsic_vminu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vminu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1892,7 +1935,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1922,12 +1965,13 @@ , i64, , + i64, i64); define @intrinsic_vminu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vminu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1936,7 +1980,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vmul-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmul-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmul-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmul-rv32.ll @@ -26,12 +26,13 @@ , , , + i32, i32); define @intrinsic_vmul_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -70,12 +71,13 @@ , , , + i32, i32); define @intrinsic_vmul_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -114,12 +116,13 @@ , , , + i32, i32); define @intrinsic_vmul_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -158,12 +161,13 @@ , , , + i32, i32); define @intrinsic_vmul_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -202,12 +206,13 @@ , , , + i32, i32); define @intrinsic_vmul_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vmul.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -246,12 +251,13 @@ , , , + i32, i32); define @intrinsic_vmul_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vmul.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -260,7 +266,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -290,13 +296,14 @@ , , , + i32, i32); define @intrinsic_vmul_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -335,12 +342,13 @@ , , , + i32, i32); define @intrinsic_vmul_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -379,12 +387,13 @@ , , , + i32, i32); define @intrinsic_vmul_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -423,12 +432,13 @@ , , , + i32, i32); define @intrinsic_vmul_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -467,12 +477,13 @@ , , , + i32, i32); define @intrinsic_vmul_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vmul.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -481,7 +492,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -511,12 +522,13 @@ , , , + i32, i32); define @intrinsic_vmul_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vmul.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -525,7 +537,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -555,13 +567,14 @@ , , , + i32, i32); define @intrinsic_vmul_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -600,12 +613,13 @@ , , , + i32, i32); define @intrinsic_vmul_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -644,12 +658,13 @@ , , , + i32, i32); define @intrinsic_vmul_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -658,7 +673,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -688,12 +703,13 @@ , , , + i32, i32); define @intrinsic_vmul_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vmul.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -702,7 +718,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -732,12 +748,13 @@ , , , + i32, i32); define @intrinsic_vmul_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vmul.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +763,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -776,13 +793,14 @@ , , , + i32, i32); define @intrinsic_vmul_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +809,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -821,12 +839,13 @@ , , , + i32, i32); define @intrinsic_vmul_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +854,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -865,12 +884,13 @@ , , , + i32, i32); define @intrinsic_vmul_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vmul.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +899,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -909,12 +929,13 @@ , , , + i32, i32); define @intrinsic_vmul_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vmul.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +944,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -953,13 +974,14 @@ , , , + i32, i32); define @intrinsic_vmul_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -968,7 +990,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -998,12 +1020,13 @@ , i8, , + i32, i32); define @intrinsic_vmul_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vmul.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1012,7 +1035,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1042,12 +1065,13 @@ , i8, , + i32, i32); define @intrinsic_vmul_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vmul.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1056,7 +1080,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1086,12 +1110,13 @@ , i8, , + i32, i32); define @intrinsic_vmul_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vmul.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1100,7 +1125,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1130,12 +1155,13 @@ , i8, , + i32, i32); define @intrinsic_vmul_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmul.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1144,7 +1170,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1174,12 +1200,13 @@ , i8, , + i32, i32); define @intrinsic_vmul_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vmul.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1188,7 +1215,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1218,12 +1245,13 @@ , i8, , + i32, i32); define @intrinsic_vmul_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vmul.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1232,7 +1260,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1262,12 +1290,13 @@ , i8, , + i32, i32); define @intrinsic_vmul_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vmul.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1276,7 +1305,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1306,12 +1335,13 @@ , i16, , + i32, i32); define @intrinsic_vmul_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmul.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1320,7 +1350,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1350,12 +1380,13 @@ , i16, , + i32, i32); define @intrinsic_vmul_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmul.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1364,7 +1395,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1394,12 +1425,13 @@ , i16, , + i32, i32); define @intrinsic_vmul_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmul.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1408,7 +1440,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1438,12 +1470,13 @@ , i16, , + i32, i32); define @intrinsic_vmul_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vmul.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1452,7 +1485,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1482,12 +1515,13 @@ , i16, , + i32, i32); define @intrinsic_vmul_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vmul.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1496,7 +1530,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1526,12 +1560,13 @@ , i16, , + i32, i32); define @intrinsic_vmul_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vmul.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1540,7 +1575,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1570,12 +1605,13 @@ , i32, , + i32, i32); define @intrinsic_vmul_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmul.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1584,7 +1620,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1614,12 +1650,13 @@ , i32, , + i32, i32); define @intrinsic_vmul_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmul.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1628,7 +1665,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1658,12 +1695,13 @@ , i32, , + i32, i32); define @intrinsic_vmul_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vmul.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1672,7 +1710,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1702,12 +1740,13 @@ , i32, , + i32, i32); define @intrinsic_vmul_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vmul.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1716,7 +1755,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1746,12 +1785,13 @@ , i32, , + i32, i32); define @intrinsic_vmul_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vmul.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1760,7 +1800,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1796,6 +1836,7 @@ , i64, , + i32, i32); define @intrinsic_vmul_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1804,10 +1845,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v25, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vmul.vv v8, v9, v25, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1817,7 +1858,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1853,6 +1894,7 @@ , i64, , + i32, i32); define @intrinsic_vmul_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1861,10 +1903,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v26, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vmul.vv v8, v10, v26, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1874,7 +1916,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1910,6 +1952,7 @@ , i64, , + i32, i32); define @intrinsic_vmul_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1918,10 +1961,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v28, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vmul.vv v8, v12, v28, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1931,7 +1974,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1967,6 +2010,7 @@ , i64, , + i32, i32); define @intrinsic_vmul_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1975,10 +2019,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v24, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1988,7 +2032,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vmul-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmul-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmul-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmul-rv64.ll @@ -26,12 +26,13 @@ , , , + i64, i64); define @intrinsic_vmul_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -70,12 +71,13 @@ , , , + i64, i64); define @intrinsic_vmul_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -114,12 +116,13 @@ , , , + i64, i64); define @intrinsic_vmul_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -158,12 +161,13 @@ , , , + i64, i64); define @intrinsic_vmul_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -202,12 +206,13 @@ , , , + i64, i64); define @intrinsic_vmul_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vmul.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -246,12 +251,13 @@ , , , + i64, i64); define @intrinsic_vmul_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vmul.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -260,7 +266,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -290,13 +296,14 @@ , , , + i64, i64); define @intrinsic_vmul_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -335,12 +342,13 @@ , , , + i64, i64); define @intrinsic_vmul_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -379,12 +387,13 @@ , , , + i64, i64); define @intrinsic_vmul_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -423,12 +432,13 @@ , , , + i64, i64); define @intrinsic_vmul_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -467,12 +477,13 @@ , , , + i64, i64); define @intrinsic_vmul_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vmul.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -481,7 +492,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -511,12 +522,13 @@ , , , + i64, i64); define @intrinsic_vmul_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vmul.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -525,7 +537,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -555,13 +567,14 @@ , , , + i64, i64); define @intrinsic_vmul_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -600,12 +613,13 @@ , , , + i64, i64); define @intrinsic_vmul_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -644,12 +658,13 @@ , , , + i64, i64); define @intrinsic_vmul_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -658,7 +673,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -688,12 +703,13 @@ , , , + i64, i64); define @intrinsic_vmul_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vmul.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -702,7 +718,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -732,12 +748,13 @@ , , , + i64, i64); define @intrinsic_vmul_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vmul.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +763,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -776,13 +793,14 @@ , , , + i64, i64); define @intrinsic_vmul_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +809,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -821,12 +839,13 @@ , , , + i64, i64); define @intrinsic_vmul_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +854,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -865,12 +884,13 @@ , , , + i64, i64); define @intrinsic_vmul_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vmul.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +899,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -909,12 +929,13 @@ , , , + i64, i64); define @intrinsic_vmul_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vmul.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +944,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -953,13 +974,14 @@ , , , + i64, i64); define @intrinsic_vmul_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -968,7 +990,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -998,12 +1020,13 @@ , i8, , + i64, i64); define @intrinsic_vmul_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vmul.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1012,7 +1035,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1042,12 +1065,13 @@ , i8, , + i64, i64); define @intrinsic_vmul_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vmul.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1056,7 +1080,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1086,12 +1110,13 @@ , i8, , + i64, i64); define @intrinsic_vmul_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vmul.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1100,7 +1125,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1130,12 +1155,13 @@ , i8, , + i64, i64); define @intrinsic_vmul_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmul.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1144,7 +1170,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1174,12 +1200,13 @@ , i8, , + i64, i64); define @intrinsic_vmul_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vmul.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1188,7 +1215,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1218,12 +1245,13 @@ , i8, , + i64, i64); define @intrinsic_vmul_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vmul.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1232,7 +1260,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1262,12 +1290,13 @@ , i8, , + i64, i64); define @intrinsic_vmul_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vmul.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1276,7 +1305,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1306,12 +1335,13 @@ , i16, , + i64, i64); define @intrinsic_vmul_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmul.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1320,7 +1350,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1350,12 +1380,13 @@ , i16, , + i64, i64); define @intrinsic_vmul_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmul.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1364,7 +1395,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1394,12 +1425,13 @@ , i16, , + i64, i64); define @intrinsic_vmul_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmul.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1408,7 +1440,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1438,12 +1470,13 @@ , i16, , + i64, i64); define @intrinsic_vmul_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vmul.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1452,7 +1485,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1482,12 +1515,13 @@ , i16, , + i64, i64); define @intrinsic_vmul_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vmul.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1496,7 +1530,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1526,12 +1560,13 @@ , i16, , + i64, i64); define @intrinsic_vmul_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vmul.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1540,7 +1575,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1570,12 +1605,13 @@ , i32, , + i64, i64); define @intrinsic_vmul_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmul.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1584,7 +1620,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1614,12 +1650,13 @@ , i32, , + i64, i64); define @intrinsic_vmul_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmul.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1628,7 +1665,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1658,12 +1695,13 @@ , i32, , + i64, i64); define @intrinsic_vmul_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vmul.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1672,7 +1710,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1702,12 +1740,13 @@ , i32, , + i64, i64); define @intrinsic_vmul_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vmul.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1716,7 +1755,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1746,12 +1785,13 @@ , i32, , + i64, i64); define @intrinsic_vmul_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vmul.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1760,7 +1800,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1790,12 +1830,13 @@ , i64, , + i64, i64); define @intrinsic_vmul_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vmul.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1804,7 +1845,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1834,12 +1875,13 @@ , i64, , + i64, i64); define @intrinsic_vmul_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vmul.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1848,7 +1890,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1878,12 +1920,13 @@ , i64, , + i64, i64); define @intrinsic_vmul_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vmul.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1892,7 +1935,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1922,12 +1965,13 @@ , i64, , + i64, i64); define @intrinsic_vmul_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vmul.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1936,7 +1980,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulh-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmulh-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmulh-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmulh-rv32.ll @@ -26,12 +26,13 @@ , , , + i32, i32); define @intrinsic_vmulh_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vmulh.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -70,12 +71,13 @@ , , , + i32, i32); define @intrinsic_vmulh_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vmulh.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -114,12 +116,13 @@ , , , + i32, i32); define @intrinsic_vmulh_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vmulh.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -158,12 +161,13 @@ , , , + i32, i32); define @intrinsic_vmulh_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmulh.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -202,12 +206,13 @@ , , , + i32, i32); define @intrinsic_vmulh_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vmulh.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -246,12 +251,13 @@ , , , + i32, i32); define @intrinsic_vmulh_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vmulh.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -260,7 +266,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -290,13 +296,14 @@ , , , + i32, i32); define @intrinsic_vmulh_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vmulh.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -335,12 +342,13 @@ , , , + i32, i32); define @intrinsic_vmulh_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vmulh.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -379,12 +387,13 @@ , , , + i32, i32); define @intrinsic_vmulh_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vmulh.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -423,12 +432,13 @@ , , , + i32, i32); define @intrinsic_vmulh_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmulh.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -467,12 +477,13 @@ , , , + i32, i32); define @intrinsic_vmulh_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vmulh.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -481,7 +492,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -511,12 +522,13 @@ , , , + i32, i32); define @intrinsic_vmulh_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vmulh.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -525,7 +537,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -555,13 +567,14 @@ , , , + i32, i32); define @intrinsic_vmulh_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vmulh.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -600,12 +613,13 @@ , , , + i32, i32); define @intrinsic_vmulh_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vmulh.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -644,12 +658,13 @@ , , , + i32, i32); define @intrinsic_vmulh_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmulh.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -658,7 +673,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -688,12 +703,13 @@ , , , + i32, i32); define @intrinsic_vmulh_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vmulh.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -702,7 +718,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -732,12 +748,13 @@ , , , + i32, i32); define @intrinsic_vmulh_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vmulh.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +763,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -776,13 +793,14 @@ , , , + i32, i32); define @intrinsic_vmulh_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vmulh.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +809,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -821,12 +839,13 @@ , , , + i32, i32); define @intrinsic_vmulh_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmulh.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +854,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -865,12 +884,13 @@ , , , + i32, i32); define @intrinsic_vmulh_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vmulh.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +899,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -909,12 +929,13 @@ , , , + i32, i32); define @intrinsic_vmulh_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vmulh.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +944,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -953,13 +974,14 @@ , , , + i32, i32); define @intrinsic_vmulh_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vmulh.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -968,7 +990,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -998,12 +1020,13 @@ , i8, , + i32, i32); define @intrinsic_vmulh_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vmulh.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1012,7 +1035,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1042,12 +1065,13 @@ , i8, , + i32, i32); define @intrinsic_vmulh_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vmulh.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1056,7 +1080,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1086,12 +1110,13 @@ , i8, , + i32, i32); define @intrinsic_vmulh_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vmulh.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1100,7 +1125,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1130,12 +1155,13 @@ , i8, , + i32, i32); define @intrinsic_vmulh_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmulh.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1144,7 +1170,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1174,12 +1200,13 @@ , i8, , + i32, i32); define @intrinsic_vmulh_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vmulh.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1188,7 +1215,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1218,12 +1245,13 @@ , i8, , + i32, i32); define @intrinsic_vmulh_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vmulh.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1232,7 +1260,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1262,12 +1290,13 @@ , i8, , + i32, i32); define @intrinsic_vmulh_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vmulh.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1276,7 +1305,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1306,12 +1335,13 @@ , i16, , + i32, i32); define @intrinsic_vmulh_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmulh.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1320,7 +1350,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1350,12 +1380,13 @@ , i16, , + i32, i32); define @intrinsic_vmulh_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmulh.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1364,7 +1395,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1394,12 +1425,13 @@ , i16, , + i32, i32); define @intrinsic_vmulh_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmulh.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1408,7 +1440,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1438,12 +1470,13 @@ , i16, , + i32, i32); define @intrinsic_vmulh_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vmulh.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1452,7 +1485,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1482,12 +1515,13 @@ , i16, , + i32, i32); define @intrinsic_vmulh_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vmulh.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1496,7 +1530,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1526,12 +1560,13 @@ , i16, , + i32, i32); define @intrinsic_vmulh_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vmulh.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1540,7 +1575,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1570,12 +1605,13 @@ , i32, , + i32, i32); define @intrinsic_vmulh_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmulh.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1584,7 +1620,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1614,12 +1650,13 @@ , i32, , + i32, i32); define @intrinsic_vmulh_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmulh.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1628,7 +1665,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1658,12 +1695,13 @@ , i32, , + i32, i32); define @intrinsic_vmulh_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vmulh.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1672,7 +1710,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1702,12 +1740,13 @@ , i32, , + i32, i32); define @intrinsic_vmulh_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vmulh.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1716,7 +1755,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1746,12 +1785,13 @@ , i32, , + i32, i32); define @intrinsic_vmulh_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vmulh.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1760,7 +1800,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1796,6 +1836,7 @@ , i64, , + i32, i32); define @intrinsic_vmulh_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1804,10 +1845,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v25, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vmulh.vv v8, v9, v25, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1817,7 +1858,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1853,6 +1894,7 @@ , i64, , + i32, i32); define @intrinsic_vmulh_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1861,10 +1903,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v26, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vmulh.vv v8, v10, v26, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1874,7 +1916,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1910,6 +1952,7 @@ , i64, , + i32, i32); define @intrinsic_vmulh_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1918,10 +1961,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v28, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vmulh.vv v8, v12, v28, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1931,7 +1974,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1967,6 +2010,7 @@ , i64, , + i32, i32); define @intrinsic_vmulh_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1975,10 +2019,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v24, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vmulh.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1988,7 +2032,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulh-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmulh-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmulh-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmulh-rv64.ll @@ -26,12 +26,13 @@ , , , + i64, i64); define @intrinsic_vmulh_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vmulh.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -70,12 +71,13 @@ , , , + i64, i64); define @intrinsic_vmulh_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vmulh.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -114,12 +116,13 @@ , , , + i64, i64); define @intrinsic_vmulh_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vmulh.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -158,12 +161,13 @@ , , , + i64, i64); define @intrinsic_vmulh_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmulh.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -202,12 +206,13 @@ , , , + i64, i64); define @intrinsic_vmulh_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vmulh.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -246,12 +251,13 @@ , , , + i64, i64); define @intrinsic_vmulh_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vmulh.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -260,7 +266,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -290,13 +296,14 @@ , , , + i64, i64); define @intrinsic_vmulh_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vmulh.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -335,12 +342,13 @@ , , , + i64, i64); define @intrinsic_vmulh_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vmulh.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -379,12 +387,13 @@ , , , + i64, i64); define @intrinsic_vmulh_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vmulh.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -423,12 +432,13 @@ , , , + i64, i64); define @intrinsic_vmulh_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmulh.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -467,12 +477,13 @@ , , , + i64, i64); define @intrinsic_vmulh_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vmulh.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -481,7 +492,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -511,12 +522,13 @@ , , , + i64, i64); define @intrinsic_vmulh_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vmulh.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -525,7 +537,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -555,13 +567,14 @@ , , , + i64, i64); define @intrinsic_vmulh_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vmulh.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -600,12 +613,13 @@ , , , + i64, i64); define @intrinsic_vmulh_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vmulh.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -644,12 +658,13 @@ , , , + i64, i64); define @intrinsic_vmulh_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmulh.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -658,7 +673,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -688,12 +703,13 @@ , , , + i64, i64); define @intrinsic_vmulh_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vmulh.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -702,7 +718,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -732,12 +748,13 @@ , , , + i64, i64); define @intrinsic_vmulh_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vmulh.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +763,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -776,13 +793,14 @@ , , , + i64, i64); define @intrinsic_vmulh_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vmulh.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +809,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -821,12 +839,13 @@ , , , + i64, i64); define @intrinsic_vmulh_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmulh.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +854,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -865,12 +884,13 @@ , , , + i64, i64); define @intrinsic_vmulh_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vmulh.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +899,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -909,12 +929,13 @@ , , , + i64, i64); define @intrinsic_vmulh_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vmulh.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +944,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -953,13 +974,14 @@ , , , + i64, i64); define @intrinsic_vmulh_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vmulh.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -968,7 +990,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -998,12 +1020,13 @@ , i8, , + i64, i64); define @intrinsic_vmulh_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vmulh.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1012,7 +1035,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1042,12 +1065,13 @@ , i8, , + i64, i64); define @intrinsic_vmulh_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vmulh.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1056,7 +1080,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1086,12 +1110,13 @@ , i8, , + i64, i64); define @intrinsic_vmulh_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vmulh.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1100,7 +1125,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1130,12 +1155,13 @@ , i8, , + i64, i64); define @intrinsic_vmulh_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmulh.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1144,7 +1170,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1174,12 +1200,13 @@ , i8, , + i64, i64); define @intrinsic_vmulh_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vmulh.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1188,7 +1215,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1218,12 +1245,13 @@ , i8, , + i64, i64); define @intrinsic_vmulh_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vmulh.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1232,7 +1260,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1262,12 +1290,13 @@ , i8, , + i64, i64); define @intrinsic_vmulh_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vmulh.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1276,7 +1305,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1306,12 +1335,13 @@ , i16, , + i64, i64); define @intrinsic_vmulh_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmulh.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1320,7 +1350,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1350,12 +1380,13 @@ , i16, , + i64, i64); define @intrinsic_vmulh_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmulh.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1364,7 +1395,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1394,12 +1425,13 @@ , i16, , + i64, i64); define @intrinsic_vmulh_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmulh.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1408,7 +1440,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1438,12 +1470,13 @@ , i16, , + i64, i64); define @intrinsic_vmulh_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vmulh.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1452,7 +1485,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1482,12 +1515,13 @@ , i16, , + i64, i64); define @intrinsic_vmulh_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vmulh.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1496,7 +1530,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1526,12 +1560,13 @@ , i16, , + i64, i64); define @intrinsic_vmulh_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vmulh.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1540,7 +1575,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1570,12 +1605,13 @@ , i32, , + i64, i64); define @intrinsic_vmulh_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmulh.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1584,7 +1620,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1614,12 +1650,13 @@ , i32, , + i64, i64); define @intrinsic_vmulh_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmulh.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1628,7 +1665,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1658,12 +1695,13 @@ , i32, , + i64, i64); define @intrinsic_vmulh_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vmulh.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1672,7 +1710,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1702,12 +1740,13 @@ , i32, , + i64, i64); define @intrinsic_vmulh_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vmulh.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1716,7 +1755,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1746,12 +1785,13 @@ , i32, , + i64, i64); define @intrinsic_vmulh_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vmulh.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1760,7 +1800,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1790,12 +1830,13 @@ , i64, , + i64, i64); define @intrinsic_vmulh_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vmulh.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1804,7 +1845,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1834,12 +1875,13 @@ , i64, , + i64, i64); define @intrinsic_vmulh_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vmulh.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1848,7 +1890,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1878,12 +1920,13 @@ , i64, , + i64, i64); define @intrinsic_vmulh_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vmulh.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1892,7 +1935,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1922,12 +1965,13 @@ , i64, , + i64, i64); define @intrinsic_vmulh_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vmulh.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1936,7 +1980,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv32.ll @@ -26,12 +26,13 @@ , , , + i32, i32); define @intrinsic_vmulhsu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vmulhsu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -70,12 +71,13 @@ , , , + i32, i32); define @intrinsic_vmulhsu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vmulhsu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -114,12 +116,13 @@ , , , + i32, i32); define @intrinsic_vmulhsu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vmulhsu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -158,12 +161,13 @@ , , , + i32, i32); define @intrinsic_vmulhsu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmulhsu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -202,12 +206,13 @@ , , , + i32, i32); define @intrinsic_vmulhsu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vmulhsu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -246,12 +251,13 @@ , , , + i32, i32); define @intrinsic_vmulhsu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vmulhsu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -260,7 +266,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -290,13 +296,14 @@ , , , + i32, i32); define @intrinsic_vmulhsu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vmulhsu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -335,12 +342,13 @@ , , , + i32, i32); define @intrinsic_vmulhsu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vmulhsu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -379,12 +387,13 @@ , , , + i32, i32); define @intrinsic_vmulhsu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vmulhsu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -423,12 +432,13 @@ , , , + i32, i32); define @intrinsic_vmulhsu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmulhsu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -467,12 +477,13 @@ , , , + i32, i32); define @intrinsic_vmulhsu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vmulhsu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -481,7 +492,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -511,12 +522,13 @@ , , , + i32, i32); define @intrinsic_vmulhsu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vmulhsu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -525,7 +537,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -555,13 +567,14 @@ , , , + i32, i32); define @intrinsic_vmulhsu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vmulhsu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -600,12 +613,13 @@ , , , + i32, i32); define @intrinsic_vmulhsu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vmulhsu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -644,12 +658,13 @@ , , , + i32, i32); define @intrinsic_vmulhsu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmulhsu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -658,7 +673,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -688,12 +703,13 @@ , , , + i32, i32); define @intrinsic_vmulhsu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vmulhsu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -702,7 +718,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -732,12 +748,13 @@ , , , + i32, i32); define @intrinsic_vmulhsu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vmulhsu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +763,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -776,13 +793,14 @@ , , , + i32, i32); define @intrinsic_vmulhsu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vmulhsu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +809,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -821,12 +839,13 @@ , , , + i32, i32); define @intrinsic_vmulhsu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmulhsu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +854,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -865,12 +884,13 @@ , , , + i32, i32); define @intrinsic_vmulhsu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vmulhsu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +899,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -909,12 +929,13 @@ , , , + i32, i32); define @intrinsic_vmulhsu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vmulhsu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +944,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -953,13 +974,14 @@ , , , + i32, i32); define @intrinsic_vmulhsu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vmulhsu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -968,7 +990,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -998,12 +1020,13 @@ , i8, , + i32, i32); define @intrinsic_vmulhsu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vmulhsu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1012,7 +1035,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1042,12 +1065,13 @@ , i8, , + i32, i32); define @intrinsic_vmulhsu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vmulhsu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1056,7 +1080,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1086,12 +1110,13 @@ , i8, , + i32, i32); define @intrinsic_vmulhsu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vmulhsu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1100,7 +1125,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1130,12 +1155,13 @@ , i8, , + i32, i32); define @intrinsic_vmulhsu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmulhsu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1144,7 +1170,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1174,12 +1200,13 @@ , i8, , + i32, i32); define @intrinsic_vmulhsu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vmulhsu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1188,7 +1215,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1218,12 +1245,13 @@ , i8, , + i32, i32); define @intrinsic_vmulhsu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vmulhsu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1232,7 +1260,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1262,12 +1290,13 @@ , i8, , + i32, i32); define @intrinsic_vmulhsu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vmulhsu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1276,7 +1305,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1306,12 +1335,13 @@ , i16, , + i32, i32); define @intrinsic_vmulhsu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmulhsu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1320,7 +1350,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1350,12 +1380,13 @@ , i16, , + i32, i32); define @intrinsic_vmulhsu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmulhsu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1364,7 +1395,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1394,12 +1425,13 @@ , i16, , + i32, i32); define @intrinsic_vmulhsu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmulhsu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1408,7 +1440,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1438,12 +1470,13 @@ , i16, , + i32, i32); define @intrinsic_vmulhsu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vmulhsu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1452,7 +1485,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1482,12 +1515,13 @@ , i16, , + i32, i32); define @intrinsic_vmulhsu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vmulhsu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1496,7 +1530,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1526,12 +1560,13 @@ , i16, , + i32, i32); define @intrinsic_vmulhsu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vmulhsu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1540,7 +1575,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1570,12 +1605,13 @@ , i32, , + i32, i32); define @intrinsic_vmulhsu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmulhsu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1584,7 +1620,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1614,12 +1650,13 @@ , i32, , + i32, i32); define @intrinsic_vmulhsu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmulhsu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1628,7 +1665,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1658,12 +1695,13 @@ , i32, , + i32, i32); define @intrinsic_vmulhsu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vmulhsu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1672,7 +1710,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1702,12 +1740,13 @@ , i32, , + i32, i32); define @intrinsic_vmulhsu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vmulhsu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1716,7 +1755,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1746,12 +1785,13 @@ , i32, , + i32, i32); define @intrinsic_vmulhsu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vmulhsu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1760,7 +1800,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1796,6 +1836,7 @@ , i64, , + i32, i32); define @intrinsic_vmulhsu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1804,10 +1845,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v25, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vmulhsu.vv v8, v9, v25, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1817,7 +1858,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1853,6 +1894,7 @@ , i64, , + i32, i32); define @intrinsic_vmulhsu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1861,10 +1903,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v26, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vmulhsu.vv v8, v10, v26, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1874,7 +1916,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1910,6 +1952,7 @@ , i64, , + i32, i32); define @intrinsic_vmulhsu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1918,10 +1961,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v28, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vmulhsu.vv v8, v12, v28, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1931,7 +1974,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1967,6 +2010,7 @@ , i64, , + i32, i32); define @intrinsic_vmulhsu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1975,10 +2019,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v24, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vmulhsu.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1988,7 +2032,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv64.ll @@ -26,12 +26,13 @@ , , , + i64, i64); define @intrinsic_vmulhsu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vmulhsu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -70,12 +71,13 @@ , , , + i64, i64); define @intrinsic_vmulhsu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vmulhsu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -114,12 +116,13 @@ , , , + i64, i64); define @intrinsic_vmulhsu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vmulhsu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -158,12 +161,13 @@ , , , + i64, i64); define @intrinsic_vmulhsu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmulhsu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -202,12 +206,13 @@ , , , + i64, i64); define @intrinsic_vmulhsu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vmulhsu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -246,12 +251,13 @@ , , , + i64, i64); define @intrinsic_vmulhsu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vmulhsu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -260,7 +266,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -290,13 +296,14 @@ , , , + i64, i64); define @intrinsic_vmulhsu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vmulhsu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -335,12 +342,13 @@ , , , + i64, i64); define @intrinsic_vmulhsu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vmulhsu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -379,12 +387,13 @@ , , , + i64, i64); define @intrinsic_vmulhsu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vmulhsu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -423,12 +432,13 @@ , , , + i64, i64); define @intrinsic_vmulhsu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmulhsu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -467,12 +477,13 @@ , , , + i64, i64); define @intrinsic_vmulhsu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vmulhsu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -481,7 +492,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -511,12 +522,13 @@ , , , + i64, i64); define @intrinsic_vmulhsu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vmulhsu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -525,7 +537,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -555,13 +567,14 @@ , , , + i64, i64); define @intrinsic_vmulhsu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vmulhsu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -600,12 +613,13 @@ , , , + i64, i64); define @intrinsic_vmulhsu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vmulhsu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -644,12 +658,13 @@ , , , + i64, i64); define @intrinsic_vmulhsu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmulhsu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -658,7 +673,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -688,12 +703,13 @@ , , , + i64, i64); define @intrinsic_vmulhsu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vmulhsu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -702,7 +718,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -732,12 +748,13 @@ , , , + i64, i64); define @intrinsic_vmulhsu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vmulhsu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +763,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -776,13 +793,14 @@ , , , + i64, i64); define @intrinsic_vmulhsu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vmulhsu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +809,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -821,12 +839,13 @@ , , , + i64, i64); define @intrinsic_vmulhsu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmulhsu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +854,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -865,12 +884,13 @@ , , , + i64, i64); define @intrinsic_vmulhsu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vmulhsu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +899,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -909,12 +929,13 @@ , , , + i64, i64); define @intrinsic_vmulhsu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vmulhsu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +944,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -953,13 +974,14 @@ , , , + i64, i64); define @intrinsic_vmulhsu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vmulhsu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -968,7 +990,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -998,12 +1020,13 @@ , i8, , + i64, i64); define @intrinsic_vmulhsu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vmulhsu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1012,7 +1035,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1042,12 +1065,13 @@ , i8, , + i64, i64); define @intrinsic_vmulhsu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vmulhsu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1056,7 +1080,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1086,12 +1110,13 @@ , i8, , + i64, i64); define @intrinsic_vmulhsu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vmulhsu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1100,7 +1125,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1130,12 +1155,13 @@ , i8, , + i64, i64); define @intrinsic_vmulhsu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmulhsu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1144,7 +1170,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1174,12 +1200,13 @@ , i8, , + i64, i64); define @intrinsic_vmulhsu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vmulhsu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1188,7 +1215,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1218,12 +1245,13 @@ , i8, , + i64, i64); define @intrinsic_vmulhsu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vmulhsu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1232,7 +1260,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1262,12 +1290,13 @@ , i8, , + i64, i64); define @intrinsic_vmulhsu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vmulhsu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1276,7 +1305,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1306,12 +1335,13 @@ , i16, , + i64, i64); define @intrinsic_vmulhsu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmulhsu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1320,7 +1350,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1350,12 +1380,13 @@ , i16, , + i64, i64); define @intrinsic_vmulhsu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmulhsu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1364,7 +1395,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1394,12 +1425,13 @@ , i16, , + i64, i64); define @intrinsic_vmulhsu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmulhsu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1408,7 +1440,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1438,12 +1470,13 @@ , i16, , + i64, i64); define @intrinsic_vmulhsu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vmulhsu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1452,7 +1485,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1482,12 +1515,13 @@ , i16, , + i64, i64); define @intrinsic_vmulhsu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vmulhsu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1496,7 +1530,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1526,12 +1560,13 @@ , i16, , + i64, i64); define @intrinsic_vmulhsu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vmulhsu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1540,7 +1575,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1570,12 +1605,13 @@ , i32, , + i64, i64); define @intrinsic_vmulhsu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmulhsu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1584,7 +1620,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1614,12 +1650,13 @@ , i32, , + i64, i64); define @intrinsic_vmulhsu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmulhsu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1628,7 +1665,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1658,12 +1695,13 @@ , i32, , + i64, i64); define @intrinsic_vmulhsu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vmulhsu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1672,7 +1710,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1702,12 +1740,13 @@ , i32, , + i64, i64); define @intrinsic_vmulhsu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vmulhsu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1716,7 +1755,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1746,12 +1785,13 @@ , i32, , + i64, i64); define @intrinsic_vmulhsu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vmulhsu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1760,7 +1800,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1790,12 +1830,13 @@ , i64, , + i64, i64); define @intrinsic_vmulhsu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vmulhsu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1804,7 +1845,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1834,12 +1875,13 @@ , i64, , + i64, i64); define @intrinsic_vmulhsu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vmulhsu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1848,7 +1890,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1878,12 +1920,13 @@ , i64, , + i64, i64); define @intrinsic_vmulhsu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vmulhsu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1892,7 +1935,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1922,12 +1965,13 @@ , i64, , + i64, i64); define @intrinsic_vmulhsu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vmulhsu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1936,7 +1980,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv32.ll @@ -26,12 +26,13 @@ , , , + i32, i32); define @intrinsic_vmulhu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vmulhu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -70,12 +71,13 @@ , , , + i32, i32); define @intrinsic_vmulhu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vmulhu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -114,12 +116,13 @@ , , , + i32, i32); define @intrinsic_vmulhu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vmulhu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -158,12 +161,13 @@ , , , + i32, i32); define @intrinsic_vmulhu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmulhu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -202,12 +206,13 @@ , , , + i32, i32); define @intrinsic_vmulhu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vmulhu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -246,12 +251,13 @@ , , , + i32, i32); define @intrinsic_vmulhu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vmulhu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -260,7 +266,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -290,13 +296,14 @@ , , , + i32, i32); define @intrinsic_vmulhu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vmulhu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -335,12 +342,13 @@ , , , + i32, i32); define @intrinsic_vmulhu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vmulhu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -379,12 +387,13 @@ , , , + i32, i32); define @intrinsic_vmulhu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vmulhu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -423,12 +432,13 @@ , , , + i32, i32); define @intrinsic_vmulhu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmulhu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -467,12 +477,13 @@ , , , + i32, i32); define @intrinsic_vmulhu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vmulhu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -481,7 +492,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -511,12 +522,13 @@ , , , + i32, i32); define @intrinsic_vmulhu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vmulhu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -525,7 +537,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -555,13 +567,14 @@ , , , + i32, i32); define @intrinsic_vmulhu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vmulhu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -600,12 +613,13 @@ , , , + i32, i32); define @intrinsic_vmulhu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vmulhu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -644,12 +658,13 @@ , , , + i32, i32); define @intrinsic_vmulhu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmulhu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -658,7 +673,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -688,12 +703,13 @@ , , , + i32, i32); define @intrinsic_vmulhu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vmulhu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -702,7 +718,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -732,12 +748,13 @@ , , , + i32, i32); define @intrinsic_vmulhu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vmulhu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +763,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -776,13 +793,14 @@ , , , + i32, i32); define @intrinsic_vmulhu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vmulhu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +809,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -821,12 +839,13 @@ , , , + i32, i32); define @intrinsic_vmulhu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmulhu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +854,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -865,12 +884,13 @@ , , , + i32, i32); define @intrinsic_vmulhu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vmulhu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +899,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -909,12 +929,13 @@ , , , + i32, i32); define @intrinsic_vmulhu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vmulhu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +944,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -953,13 +974,14 @@ , , , + i32, i32); define @intrinsic_vmulhu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vmulhu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -968,7 +990,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -998,12 +1020,13 @@ , i8, , + i32, i32); define @intrinsic_vmulhu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vmulhu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1012,7 +1035,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1042,12 +1065,13 @@ , i8, , + i32, i32); define @intrinsic_vmulhu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vmulhu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1056,7 +1080,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1086,12 +1110,13 @@ , i8, , + i32, i32); define @intrinsic_vmulhu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vmulhu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1100,7 +1125,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1130,12 +1155,13 @@ , i8, , + i32, i32); define @intrinsic_vmulhu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmulhu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1144,7 +1170,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1174,12 +1200,13 @@ , i8, , + i32, i32); define @intrinsic_vmulhu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vmulhu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1188,7 +1215,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1218,12 +1245,13 @@ , i8, , + i32, i32); define @intrinsic_vmulhu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vmulhu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1232,7 +1260,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1262,12 +1290,13 @@ , i8, , + i32, i32); define @intrinsic_vmulhu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vmulhu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1276,7 +1305,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1306,12 +1335,13 @@ , i16, , + i32, i32); define @intrinsic_vmulhu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmulhu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1320,7 +1350,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1350,12 +1380,13 @@ , i16, , + i32, i32); define @intrinsic_vmulhu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmulhu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1364,7 +1395,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1394,12 +1425,13 @@ , i16, , + i32, i32); define @intrinsic_vmulhu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmulhu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1408,7 +1440,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1438,12 +1470,13 @@ , i16, , + i32, i32); define @intrinsic_vmulhu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vmulhu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1452,7 +1485,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1482,12 +1515,13 @@ , i16, , + i32, i32); define @intrinsic_vmulhu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vmulhu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1496,7 +1530,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1526,12 +1560,13 @@ , i16, , + i32, i32); define @intrinsic_vmulhu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vmulhu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1540,7 +1575,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1570,12 +1605,13 @@ , i32, , + i32, i32); define @intrinsic_vmulhu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmulhu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1584,7 +1620,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1614,12 +1650,13 @@ , i32, , + i32, i32); define @intrinsic_vmulhu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmulhu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1628,7 +1665,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1658,12 +1695,13 @@ , i32, , + i32, i32); define @intrinsic_vmulhu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vmulhu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1672,7 +1710,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1702,12 +1740,13 @@ , i32, , + i32, i32); define @intrinsic_vmulhu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vmulhu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1716,7 +1755,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1746,12 +1785,13 @@ , i32, , + i32, i32); define @intrinsic_vmulhu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vmulhu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1760,7 +1800,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1796,6 +1836,7 @@ , i64, , + i32, i32); define @intrinsic_vmulhu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1804,10 +1845,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v25, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vmulhu.vv v8, v9, v25, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1817,7 +1858,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1853,6 +1894,7 @@ , i64, , + i32, i32); define @intrinsic_vmulhu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1861,10 +1903,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v26, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vmulhu.vv v8, v10, v26, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1874,7 +1916,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1910,6 +1952,7 @@ , i64, , + i32, i32); define @intrinsic_vmulhu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1918,10 +1961,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v28, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vmulhu.vv v8, v12, v28, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1931,7 +1974,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1967,6 +2010,7 @@ , i64, , + i32, i32); define @intrinsic_vmulhu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1975,10 +2019,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v24, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vmulhu.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1988,7 +2032,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv64.ll @@ -26,12 +26,13 @@ , , , + i64, i64); define @intrinsic_vmulhu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vmulhu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -70,12 +71,13 @@ , , , + i64, i64); define @intrinsic_vmulhu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vmulhu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -114,12 +116,13 @@ , , , + i64, i64); define @intrinsic_vmulhu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vmulhu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -158,12 +161,13 @@ , , , + i64, i64); define @intrinsic_vmulhu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmulhu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -202,12 +206,13 @@ , , , + i64, i64); define @intrinsic_vmulhu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vmulhu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -246,12 +251,13 @@ , , , + i64, i64); define @intrinsic_vmulhu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vmulhu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -260,7 +266,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -290,13 +296,14 @@ , , , + i64, i64); define @intrinsic_vmulhu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vmulhu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -335,12 +342,13 @@ , , , + i64, i64); define @intrinsic_vmulhu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vmulhu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -379,12 +387,13 @@ , , , + i64, i64); define @intrinsic_vmulhu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vmulhu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -423,12 +432,13 @@ , , , + i64, i64); define @intrinsic_vmulhu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmulhu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -467,12 +477,13 @@ , , , + i64, i64); define @intrinsic_vmulhu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vmulhu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -481,7 +492,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -511,12 +522,13 @@ , , , + i64, i64); define @intrinsic_vmulhu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vmulhu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -525,7 +537,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -555,13 +567,14 @@ , , , + i64, i64); define @intrinsic_vmulhu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vmulhu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -600,12 +613,13 @@ , , , + i64, i64); define @intrinsic_vmulhu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vmulhu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -644,12 +658,13 @@ , , , + i64, i64); define @intrinsic_vmulhu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmulhu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -658,7 +673,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -688,12 +703,13 @@ , , , + i64, i64); define @intrinsic_vmulhu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vmulhu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -702,7 +718,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -732,12 +748,13 @@ , , , + i64, i64); define @intrinsic_vmulhu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vmulhu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +763,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -776,13 +793,14 @@ , , , + i64, i64); define @intrinsic_vmulhu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vmulhu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +809,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -821,12 +839,13 @@ , , , + i64, i64); define @intrinsic_vmulhu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmulhu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +854,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -865,12 +884,13 @@ , , , + i64, i64); define @intrinsic_vmulhu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vmulhu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +899,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -909,12 +929,13 @@ , , , + i64, i64); define @intrinsic_vmulhu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vmulhu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +944,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -953,13 +974,14 @@ , , , + i64, i64); define @intrinsic_vmulhu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vmulhu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -968,7 +990,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -998,12 +1020,13 @@ , i8, , + i64, i64); define @intrinsic_vmulhu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vmulhu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1012,7 +1035,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1042,12 +1065,13 @@ , i8, , + i64, i64); define @intrinsic_vmulhu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vmulhu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1056,7 +1080,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1086,12 +1110,13 @@ , i8, , + i64, i64); define @intrinsic_vmulhu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vmulhu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1100,7 +1125,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1130,12 +1155,13 @@ , i8, , + i64, i64); define @intrinsic_vmulhu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmulhu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1144,7 +1170,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1174,12 +1200,13 @@ , i8, , + i64, i64); define @intrinsic_vmulhu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vmulhu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1188,7 +1215,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1218,12 +1245,13 @@ , i8, , + i64, i64); define @intrinsic_vmulhu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vmulhu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1232,7 +1260,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1262,12 +1290,13 @@ , i8, , + i64, i64); define @intrinsic_vmulhu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vmulhu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1276,7 +1305,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1306,12 +1335,13 @@ , i16, , + i64, i64); define @intrinsic_vmulhu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmulhu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1320,7 +1350,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1350,12 +1380,13 @@ , i16, , + i64, i64); define @intrinsic_vmulhu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmulhu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1364,7 +1395,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1394,12 +1425,13 @@ , i16, , + i64, i64); define @intrinsic_vmulhu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmulhu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1408,7 +1440,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1438,12 +1470,13 @@ , i16, , + i64, i64); define @intrinsic_vmulhu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vmulhu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1452,7 +1485,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1482,12 +1515,13 @@ , i16, , + i64, i64); define @intrinsic_vmulhu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vmulhu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1496,7 +1530,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1526,12 +1560,13 @@ , i16, , + i64, i64); define @intrinsic_vmulhu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vmulhu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1540,7 +1575,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1570,12 +1605,13 @@ , i32, , + i64, i64); define @intrinsic_vmulhu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmulhu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1584,7 +1620,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1614,12 +1650,13 @@ , i32, , + i64, i64); define @intrinsic_vmulhu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmulhu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1628,7 +1665,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1658,12 +1695,13 @@ , i32, , + i64, i64); define @intrinsic_vmulhu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vmulhu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1672,7 +1710,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1702,12 +1740,13 @@ , i32, , + i64, i64); define @intrinsic_vmulhu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vmulhu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1716,7 +1755,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1746,12 +1785,13 @@ , i32, , + i64, i64); define @intrinsic_vmulhu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vmulhu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1760,7 +1800,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1790,12 +1830,13 @@ , i64, , + i64, i64); define @intrinsic_vmulhu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vmulhu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1804,7 +1845,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1834,12 +1875,13 @@ , i64, , + i64, i64); define @intrinsic_vmulhu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vmulhu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1848,7 +1890,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1878,12 +1920,13 @@ , i64, , + i64, i64); define @intrinsic_vmulhu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vmulhu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1892,7 +1935,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1922,12 +1965,13 @@ , i64, , + i64, i64); define @intrinsic_vmulhu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vmulhu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1936,7 +1980,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vnclip-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnclip-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnclip-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnclip-rv32.ll @@ -26,12 +26,13 @@ , , , + i32, i32); define @intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vnclip.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -70,12 +71,13 @@ , , , + i32, i32); define @intrinsic_vnclip_mask_wv_nxv2i8_nxv2i16_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv2i8_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vnclip.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -114,12 +116,13 @@ , , , + i32, i32); define @intrinsic_vnclip_mask_wv_nxv4i8_nxv4i16_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv4i8_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vnclip.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -159,12 +162,13 @@ , , , + i32, i32); define @intrinsic_vnclip_mask_wv_nxv8i8_nxv8i16_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv8i8_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vnclip.wv v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -173,7 +177,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -204,12 +208,13 @@ , , , + i32, i32); define @intrinsic_vnclip_mask_wv_nxv16i8_nxv16i16_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv16i8_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vnclip.wv v8, v12, v10, v0.t ; CHECK-NEXT: ret entry: @@ -218,7 +223,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -249,12 +254,13 @@ , , , + i32, i32); define @intrinsic_vnclip_mask_wv_nxv32i8_nxv32i16_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv32i8_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vnclip.wv v8, v16, v12, v0.t ; CHECK-NEXT: ret entry: @@ -263,7 +269,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -293,12 +299,13 @@ , , , + i32, i32); define @intrinsic_vnclip_mask_wv_nxv1i16_nxv1i32_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i16_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vnclip.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -307,7 +314,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -337,12 +344,13 @@ , , , + i32, i32); define @intrinsic_vnclip_mask_wv_nxv2i16_nxv2i32_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv2i16_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vnclip.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -351,7 +359,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -382,12 +390,13 @@ , , , + i32, i32); define @intrinsic_vnclip_mask_wv_nxv4i16_nxv4i32_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv4i16_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vnclip.wv v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -396,7 +405,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -427,12 +436,13 @@ , , , + i32, i32); define @intrinsic_vnclip_mask_wv_nxv8i16_nxv8i32_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv8i16_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vnclip.wv v8, v12, v10, v0.t ; CHECK-NEXT: ret entry: @@ -441,7 +451,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -472,12 +482,13 @@ , , , + i32, i32); define @intrinsic_vnclip_mask_wv_nxv16i16_nxv16i32_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv16i16_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vnclip.wv v8, v16, v12, v0.t ; CHECK-NEXT: ret entry: @@ -486,7 +497,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -516,12 +527,13 @@ , , , + i32, i32); define @intrinsic_vnclip_mask_wv_nxv1i32_nxv1i64_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i32_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vnclip.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -530,7 +542,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -561,12 +573,13 @@ , , , + i32, i32); define @intrinsic_vnclip_mask_wv_nxv2i32_nxv2i64_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv2i32_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vnclip.wv v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -575,7 +588,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -606,12 +619,13 @@ , , , + i32, i32); define @intrinsic_vnclip_mask_wv_nxv4i32_nxv4i64_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv4i32_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vnclip.wv v8, v12, v10, v0.t ; CHECK-NEXT: ret entry: @@ -620,7 +634,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -651,12 +665,13 @@ , , , + i32, i32); define @intrinsic_vnclip_mask_wv_nxv8i32_nxv8i64_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv8i32_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vnclip.wv v8, v16, v12, v0.t ; CHECK-NEXT: ret entry: @@ -665,7 +680,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -695,12 +710,13 @@ , i32, , + i32, i32); define @intrinsic_vnclip_mask_vx_nxv1i8_nxv1i16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vnclip.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -709,7 +725,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -739,12 +755,13 @@ , i32, , + i32, i32); define @intrinsic_vnclip_mask_vx_nxv2i8_nxv2i16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vnclip.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -753,7 +770,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -783,12 +800,13 @@ , i32, , + i32, i32); define @intrinsic_vnclip_mask_vx_nxv4i8_nxv4i16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vnclip.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -797,7 +815,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -828,12 +846,13 @@ , i32, , + i32, i32); define @intrinsic_vnclip_mask_vx_nxv8i8_nxv8i16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vnclip.wx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -842,7 +861,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -873,12 +892,13 @@ , i32, , + i32, i32); define @intrinsic_vnclip_mask_vx_nxv16i8_nxv16i16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vnclip.wx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -887,7 +907,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -918,12 +938,13 @@ , i32, , + i32, i32); define @intrinsic_vnclip_mask_vx_nxv32i8_nxv32i16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vnclip.wx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -932,7 +953,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -962,12 +983,13 @@ , i32, , + i32, i32); define @intrinsic_vnclip_mask_vx_nxv1i16_nxv1i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vnclip.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -976,7 +998,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1006,12 +1028,13 @@ , i32, , + i32, i32); define @intrinsic_vnclip_mask_vx_nxv2i16_nxv2i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vnclip.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1020,7 +1043,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1051,12 +1074,13 @@ , i32, , + i32, i32); define @intrinsic_vnclip_mask_vx_nxv4i16_nxv4i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vnclip.wx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1065,7 +1089,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1096,12 +1120,13 @@ , i32, , + i32, i32); define @intrinsic_vnclip_mask_vx_nxv8i16_nxv8i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vnclip.wx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1110,7 +1135,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1141,12 +1166,13 @@ , i32, , + i32, i32); define @intrinsic_vnclip_mask_vx_nxv16i16_nxv16i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vnclip.wx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1155,7 +1181,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1185,12 +1211,13 @@ , i32, , + i32, i32); define @intrinsic_vnclip_mask_vx_nxv1i32_nxv1i64( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vnclip.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1199,7 +1226,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1230,12 +1257,13 @@ , i32, , + i32, i32); define @intrinsic_vnclip_mask_vx_nxv2i32_nxv2i64( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vnclip.wx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1244,7 +1272,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1275,12 +1303,13 @@ , i32, , + i32, i32); define @intrinsic_vnclip_mask_vx_nxv4i32_nxv4i64( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vnclip.wx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1289,7 +1318,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1320,12 +1349,13 @@ , i32, , + i32, i32); define @intrinsic_vnclip_mask_vx_nxv8i32_nxv8i64( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vnclip.wx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1334,7 +1364,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1357,7 +1387,7 @@ define @intrinsic_vnclip_mask_vi_nxv1i8_nxv1i16_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv1i8_nxv1i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vnclip.wi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1366,7 +1396,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1389,7 +1419,7 @@ define @intrinsic_vnclip_mask_vi_nxv2i8_nxv2i16_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv2i8_nxv2i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vnclip.wi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1398,7 +1428,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1421,7 +1451,7 @@ define @intrinsic_vnclip_mask_vi_nxv4i8_nxv4i16_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv4i8_nxv4i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vnclip.wi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1430,7 +1460,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1454,7 +1484,7 @@ define @intrinsic_vnclip_mask_vi_nxv8i8_nxv8i16_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv8i8_nxv8i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vnclip.wi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1463,7 +1493,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1487,7 +1517,7 @@ define @intrinsic_vnclip_mask_vi_nxv16i8_nxv16i16_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv16i8_nxv16i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vnclip.wi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1496,7 +1526,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1520,7 +1550,7 @@ define @intrinsic_vnclip_mask_vi_nxv32i8_nxv32i16_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv32i8_nxv32i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vnclip.wi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1529,7 +1559,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1552,7 +1582,7 @@ define @intrinsic_vnclip_mask_vi_nxv1i16_nxv1i32_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv1i16_nxv1i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vnclip.wi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1561,7 +1591,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1584,7 +1614,7 @@ define @intrinsic_vnclip_mask_vi_nxv2i16_nxv2i32_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv2i16_nxv2i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vnclip.wi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1593,7 +1623,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1617,7 +1647,7 @@ define @intrinsic_vnclip_mask_vi_nxv4i16_nxv4i32_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv4i16_nxv4i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vnclip.wi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1626,7 +1656,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1650,7 +1680,7 @@ define @intrinsic_vnclip_mask_vi_nxv8i16_nxv8i32_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv8i16_nxv8i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vnclip.wi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1659,7 +1689,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1683,7 +1713,7 @@ define @intrinsic_vnclip_mask_vi_nxv16i16_nxv16i32_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv16i16_nxv16i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vnclip.wi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1692,7 +1722,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1715,7 +1745,7 @@ define @intrinsic_vnclip_mask_vi_nxv1i32_nxv1i64_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv1i32_nxv1i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vnclip.wi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1724,7 +1754,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1748,7 +1778,7 @@ define @intrinsic_vnclip_mask_vi_nxv2i32_nxv2i64_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv2i32_nxv2i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vnclip.wi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1757,7 +1787,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1781,7 +1811,7 @@ define @intrinsic_vnclip_mask_vi_nxv4i32_nxv4i64_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv4i32_nxv4i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vnclip.wi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1790,7 +1820,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1814,7 +1844,7 @@ define @intrinsic_vnclip_mask_vi_nxv8i32_nxv8i64_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv8i32_nxv8i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vnclip.wi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1823,7 +1853,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vnclip-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnclip-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnclip-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnclip-rv64.ll @@ -26,12 +26,13 @@ , , , + i64, i64); define @intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vnclip.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -70,12 +71,13 @@ , , , + i64, i64); define @intrinsic_vnclip_mask_wv_nxv2i8_nxv2i16_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv2i8_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vnclip.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -114,12 +116,13 @@ , , , + i64, i64); define @intrinsic_vnclip_mask_wv_nxv4i8_nxv4i16_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv4i8_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vnclip.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -159,12 +162,13 @@ , , , + i64, i64); define @intrinsic_vnclip_mask_wv_nxv8i8_nxv8i16_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv8i8_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vnclip.wv v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -173,7 +177,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -204,12 +208,13 @@ , , , + i64, i64); define @intrinsic_vnclip_mask_wv_nxv16i8_nxv16i16_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv16i8_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vnclip.wv v8, v12, v10, v0.t ; CHECK-NEXT: ret entry: @@ -218,7 +223,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -249,12 +254,13 @@ , , , + i64, i64); define @intrinsic_vnclip_mask_wv_nxv32i8_nxv32i16_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv32i8_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vnclip.wv v8, v16, v12, v0.t ; CHECK-NEXT: ret entry: @@ -263,7 +269,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -293,12 +299,13 @@ , , , + i64, i64); define @intrinsic_vnclip_mask_wv_nxv1i16_nxv1i32_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i16_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vnclip.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -307,7 +314,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -337,12 +344,13 @@ , , , + i64, i64); define @intrinsic_vnclip_mask_wv_nxv2i16_nxv2i32_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv2i16_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vnclip.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -351,7 +359,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -382,12 +390,13 @@ , , , + i64, i64); define @intrinsic_vnclip_mask_wv_nxv4i16_nxv4i32_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv4i16_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vnclip.wv v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -396,7 +405,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -427,12 +436,13 @@ , , , + i64, i64); define @intrinsic_vnclip_mask_wv_nxv8i16_nxv8i32_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv8i16_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vnclip.wv v8, v12, v10, v0.t ; CHECK-NEXT: ret entry: @@ -441,7 +451,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -472,12 +482,13 @@ , , , + i64, i64); define @intrinsic_vnclip_mask_wv_nxv16i16_nxv16i32_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv16i16_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vnclip.wv v8, v16, v12, v0.t ; CHECK-NEXT: ret entry: @@ -486,7 +497,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -516,12 +527,13 @@ , , , + i64, i64); define @intrinsic_vnclip_mask_wv_nxv1i32_nxv1i64_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i32_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vnclip.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -530,7 +542,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -561,12 +573,13 @@ , , , + i64, i64); define @intrinsic_vnclip_mask_wv_nxv2i32_nxv2i64_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv2i32_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vnclip.wv v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -575,7 +588,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -606,12 +619,13 @@ , , , + i64, i64); define @intrinsic_vnclip_mask_wv_nxv4i32_nxv4i64_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv4i32_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vnclip.wv v8, v12, v10, v0.t ; CHECK-NEXT: ret entry: @@ -620,7 +634,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -651,12 +665,13 @@ , , , + i64, i64); define @intrinsic_vnclip_mask_wv_nxv8i32_nxv8i64_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv8i32_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vnclip.wv v8, v16, v12, v0.t ; CHECK-NEXT: ret entry: @@ -665,7 +680,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -695,12 +710,13 @@ , i64, , + i64, i64); define @intrinsic_vnclip_mask_vx_nxv1i8_nxv1i16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vnclip.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -709,7 +725,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -739,12 +755,13 @@ , i64, , + i64, i64); define @intrinsic_vnclip_mask_vx_nxv2i8_nxv2i16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vnclip.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -753,7 +770,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -783,12 +800,13 @@ , i64, , + i64, i64); define @intrinsic_vnclip_mask_vx_nxv4i8_nxv4i16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vnclip.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -797,7 +815,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -828,12 +846,13 @@ , i64, , + i64, i64); define @intrinsic_vnclip_mask_vx_nxv8i8_nxv8i16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vnclip.wx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -842,7 +861,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -873,12 +892,13 @@ , i64, , + i64, i64); define @intrinsic_vnclip_mask_vx_nxv16i8_nxv16i16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vnclip.wx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -887,7 +907,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -918,12 +938,13 @@ , i64, , + i64, i64); define @intrinsic_vnclip_mask_vx_nxv32i8_nxv32i16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vnclip.wx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -932,7 +953,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -962,12 +983,13 @@ , i64, , + i64, i64); define @intrinsic_vnclip_mask_vx_nxv1i16_nxv1i32( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vnclip.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -976,7 +998,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1006,12 +1028,13 @@ , i64, , + i64, i64); define @intrinsic_vnclip_mask_vx_nxv2i16_nxv2i32( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vnclip.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1020,7 +1043,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1051,12 +1074,13 @@ , i64, , + i64, i64); define @intrinsic_vnclip_mask_vx_nxv4i16_nxv4i32( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vnclip.wx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1065,7 +1089,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1096,12 +1120,13 @@ , i64, , + i64, i64); define @intrinsic_vnclip_mask_vx_nxv8i16_nxv8i32( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vnclip.wx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1110,7 +1135,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1141,12 +1166,13 @@ , i64, , + i64, i64); define @intrinsic_vnclip_mask_vx_nxv16i16_nxv16i32( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vnclip.wx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1155,7 +1181,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1185,12 +1211,13 @@ , i64, , + i64, i64); define @intrinsic_vnclip_mask_vx_nxv1i32_nxv1i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vnclip.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1199,7 +1226,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1230,12 +1257,13 @@ , i64, , + i64, i64); define @intrinsic_vnclip_mask_vx_nxv2i32_nxv2i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vnclip.wx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1244,7 +1272,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1275,12 +1303,13 @@ , i64, , + i64, i64); define @intrinsic_vnclip_mask_vx_nxv4i32_nxv4i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vnclip.wx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1289,7 +1318,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1320,12 +1349,13 @@ , i64, , + i64, i64); define @intrinsic_vnclip_mask_vx_nxv8i32_nxv8i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vnclip.wx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1334,7 +1364,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1357,7 +1387,7 @@ define @intrinsic_vnclip_mask_vi_nxv1i8_nxv1i16_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv1i8_nxv1i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vnclip.wi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1366,7 +1396,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1389,7 +1419,7 @@ define @intrinsic_vnclip_mask_vi_nxv2i8_nxv2i16_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv2i8_nxv2i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vnclip.wi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1398,7 +1428,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1421,7 +1451,7 @@ define @intrinsic_vnclip_mask_vi_nxv4i8_nxv4i16_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv4i8_nxv4i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vnclip.wi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1430,7 +1460,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1454,7 +1484,7 @@ define @intrinsic_vnclip_mask_vi_nxv8i8_nxv8i16_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv8i8_nxv8i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vnclip.wi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1463,7 +1493,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1487,7 +1517,7 @@ define @intrinsic_vnclip_mask_vi_nxv16i8_nxv16i16_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv16i8_nxv16i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vnclip.wi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1496,7 +1526,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1520,7 +1550,7 @@ define @intrinsic_vnclip_mask_vi_nxv32i8_nxv32i16_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv32i8_nxv32i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vnclip.wi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1529,7 +1559,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1552,7 +1582,7 @@ define @intrinsic_vnclip_mask_vi_nxv1i16_nxv1i32_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv1i16_nxv1i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vnclip.wi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1561,7 +1591,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1584,7 +1614,7 @@ define @intrinsic_vnclip_mask_vi_nxv2i16_nxv2i32_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv2i16_nxv2i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vnclip.wi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1593,7 +1623,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1617,7 +1647,7 @@ define @intrinsic_vnclip_mask_vi_nxv4i16_nxv4i32_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv4i16_nxv4i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vnclip.wi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1626,7 +1656,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1650,7 +1680,7 @@ define @intrinsic_vnclip_mask_vi_nxv8i16_nxv8i32_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv8i16_nxv8i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vnclip.wi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1659,7 +1689,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1683,7 +1713,7 @@ define @intrinsic_vnclip_mask_vi_nxv16i16_nxv16i32_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv16i16_nxv16i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vnclip.wi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1692,7 +1722,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1715,7 +1745,7 @@ define @intrinsic_vnclip_mask_vi_nxv1i32_nxv1i64_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv1i32_nxv1i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vnclip.wi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1724,7 +1754,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1748,7 +1778,7 @@ define @intrinsic_vnclip_mask_vi_nxv2i32_nxv2i64_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv2i32_nxv2i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vnclip.wi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1757,7 +1787,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1781,7 +1811,7 @@ define @intrinsic_vnclip_mask_vi_nxv4i32_nxv4i64_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv4i32_nxv4i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vnclip.wi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1790,7 +1820,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1814,7 +1844,7 @@ define @intrinsic_vnclip_mask_vi_nxv8i32_nxv8i64_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv8i32_nxv8i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vnclip.wi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1823,7 +1853,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll @@ -26,12 +26,13 @@ , , , + i32, i32); define @intrinsic_vnclipu_mask_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vnclipu.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -70,12 +71,13 @@ , , , + i32, i32); define @intrinsic_vnclipu_mask_wv_nxv2i8_nxv2i16_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv2i8_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vnclipu.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -114,12 +116,13 @@ , , , + i32, i32); define @intrinsic_vnclipu_mask_wv_nxv4i8_nxv4i16_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv4i8_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vnclipu.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -159,12 +162,13 @@ , , , + i32, i32); define @intrinsic_vnclipu_mask_wv_nxv8i8_nxv8i16_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv8i8_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vnclipu.wv v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -173,7 +177,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -204,12 +208,13 @@ , , , + i32, i32); define @intrinsic_vnclipu_mask_wv_nxv16i8_nxv16i16_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv16i8_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vnclipu.wv v8, v12, v10, v0.t ; CHECK-NEXT: ret entry: @@ -218,7 +223,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -249,12 +254,13 @@ , , , + i32, i32); define @intrinsic_vnclipu_mask_wv_nxv32i8_nxv32i16_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv32i8_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vnclipu.wv v8, v16, v12, v0.t ; CHECK-NEXT: ret entry: @@ -263,7 +269,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -293,12 +299,13 @@ , , , + i32, i32); define @intrinsic_vnclipu_mask_wv_nxv1i16_nxv1i32_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv1i16_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vnclipu.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -307,7 +314,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -337,12 +344,13 @@ , , , + i32, i32); define @intrinsic_vnclipu_mask_wv_nxv2i16_nxv2i32_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv2i16_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vnclipu.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -351,7 +359,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -382,12 +390,13 @@ , , , + i32, i32); define @intrinsic_vnclipu_mask_wv_nxv4i16_nxv4i32_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv4i16_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vnclipu.wv v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -396,7 +405,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -427,12 +436,13 @@ , , , + i32, i32); define @intrinsic_vnclipu_mask_wv_nxv8i16_nxv8i32_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv8i16_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vnclipu.wv v8, v12, v10, v0.t ; CHECK-NEXT: ret entry: @@ -441,7 +451,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -472,12 +482,13 @@ , , , + i32, i32); define @intrinsic_vnclipu_mask_wv_nxv16i16_nxv16i32_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv16i16_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vnclipu.wv v8, v16, v12, v0.t ; CHECK-NEXT: ret entry: @@ -486,7 +497,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -516,12 +527,13 @@ , , , + i32, i32); define @intrinsic_vnclipu_mask_wv_nxv1i32_nxv1i64_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv1i32_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vnclipu.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -530,7 +542,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -561,12 +573,13 @@ , , , + i32, i32); define @intrinsic_vnclipu_mask_wv_nxv2i32_nxv2i64_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv2i32_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vnclipu.wv v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -575,7 +588,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -606,12 +619,13 @@ , , , + i32, i32); define @intrinsic_vnclipu_mask_wv_nxv4i32_nxv4i64_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv4i32_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vnclipu.wv v8, v12, v10, v0.t ; CHECK-NEXT: ret entry: @@ -620,7 +634,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -651,12 +665,13 @@ , , , + i32, i32); define @intrinsic_vnclipu_mask_wv_nxv8i32_nxv8i64_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv8i32_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vnclipu.wv v8, v16, v12, v0.t ; CHECK-NEXT: ret entry: @@ -665,7 +680,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -695,12 +710,13 @@ , i32, , + i32, i32); define @intrinsic_vnclipu_mask_vx_nxv1i8_nxv1i16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vnclipu.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -709,7 +725,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -739,12 +755,13 @@ , i32, , + i32, i32); define @intrinsic_vnclipu_mask_vx_nxv2i8_nxv2i16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vnclipu.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -753,7 +770,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -783,12 +800,13 @@ , i32, , + i32, i32); define @intrinsic_vnclipu_mask_vx_nxv4i8_nxv4i16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vnclipu.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -797,7 +815,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -828,12 +846,13 @@ , i32, , + i32, i32); define @intrinsic_vnclipu_mask_vx_nxv8i8_nxv8i16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vnclipu.wx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -842,7 +861,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -873,12 +892,13 @@ , i32, , + i32, i32); define @intrinsic_vnclipu_mask_vx_nxv16i8_nxv16i16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vnclipu.wx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -887,7 +907,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -918,12 +938,13 @@ , i32, , + i32, i32); define @intrinsic_vnclipu_mask_vx_nxv32i8_nxv32i16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vnclipu.wx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -932,7 +953,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -962,12 +983,13 @@ , i32, , + i32, i32); define @intrinsic_vnclipu_mask_vx_nxv1i16_nxv1i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vnclipu.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -976,7 +998,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1006,12 +1028,13 @@ , i32, , + i32, i32); define @intrinsic_vnclipu_mask_vx_nxv2i16_nxv2i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vnclipu.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1020,7 +1043,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1051,12 +1074,13 @@ , i32, , + i32, i32); define @intrinsic_vnclipu_mask_vx_nxv4i16_nxv4i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vnclipu.wx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1065,7 +1089,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1096,12 +1120,13 @@ , i32, , + i32, i32); define @intrinsic_vnclipu_mask_vx_nxv8i16_nxv8i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vnclipu.wx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1110,7 +1135,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1141,12 +1166,13 @@ , i32, , + i32, i32); define @intrinsic_vnclipu_mask_vx_nxv16i16_nxv16i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vnclipu.wx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1155,7 +1181,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1185,12 +1211,13 @@ , i32, , + i32, i32); define @intrinsic_vnclipu_mask_vx_nxv1i32_nxv1i64( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vnclipu.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1199,7 +1226,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1230,12 +1257,13 @@ , i32, , + i32, i32); define @intrinsic_vnclipu_mask_vx_nxv2i32_nxv2i64( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vnclipu.wx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1244,7 +1272,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1275,12 +1303,13 @@ , i32, , + i32, i32); define @intrinsic_vnclipu_mask_vx_nxv4i32_nxv4i64( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vnclipu.wx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1289,7 +1318,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1320,12 +1349,13 @@ , i32, , + i32, i32); define @intrinsic_vnclipu_mask_vx_nxv8i32_nxv8i64( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vnclipu.wx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1334,7 +1364,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1357,7 +1387,7 @@ define @intrinsic_vnclipu_mask_vi_nxv1i8_nxv1i16_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv1i8_nxv1i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vnclipu.wi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1366,7 +1396,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1389,7 +1419,7 @@ define @intrinsic_vnclipu_mask_vi_nxv2i8_nxv2i16_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv2i8_nxv2i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vnclipu.wi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1398,7 +1428,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1421,7 +1451,7 @@ define @intrinsic_vnclipu_mask_vi_nxv4i8_nxv4i16_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv4i8_nxv4i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vnclipu.wi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1430,7 +1460,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1454,7 +1484,7 @@ define @intrinsic_vnclipu_mask_vi_nxv8i8_nxv8i16_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv8i8_nxv8i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vnclipu.wi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1463,7 +1493,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1487,7 +1517,7 @@ define @intrinsic_vnclipu_mask_vi_nxv16i8_nxv16i16_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv16i8_nxv16i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vnclipu.wi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1496,7 +1526,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1520,7 +1550,7 @@ define @intrinsic_vnclipu_mask_vi_nxv32i8_nxv32i16_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv32i8_nxv32i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vnclipu.wi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1529,7 +1559,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1552,7 +1582,7 @@ define @intrinsic_vnclipu_mask_vi_nxv1i16_nxv1i32_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv1i16_nxv1i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vnclipu.wi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1561,7 +1591,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1584,7 +1614,7 @@ define @intrinsic_vnclipu_mask_vi_nxv2i16_nxv2i32_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv2i16_nxv2i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vnclipu.wi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1593,7 +1623,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1617,7 +1647,7 @@ define @intrinsic_vnclipu_mask_vi_nxv4i16_nxv4i32_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv4i16_nxv4i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vnclipu.wi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1626,7 +1656,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1650,7 +1680,7 @@ define @intrinsic_vnclipu_mask_vi_nxv8i16_nxv8i32_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv8i16_nxv8i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vnclipu.wi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1659,7 +1689,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1683,7 +1713,7 @@ define @intrinsic_vnclipu_mask_vi_nxv16i16_nxv16i32_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv16i16_nxv16i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vnclipu.wi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1692,7 +1722,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1715,7 +1745,7 @@ define @intrinsic_vnclipu_mask_vi_nxv1i32_nxv1i64_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv1i32_nxv1i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vnclipu.wi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1724,7 +1754,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1748,7 +1778,7 @@ define @intrinsic_vnclipu_mask_vi_nxv2i32_nxv2i64_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv2i32_nxv2i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vnclipu.wi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1757,7 +1787,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1781,7 +1811,7 @@ define @intrinsic_vnclipu_mask_vi_nxv4i32_nxv4i64_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv4i32_nxv4i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vnclipu.wi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1790,7 +1820,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1814,7 +1844,7 @@ define @intrinsic_vnclipu_mask_vi_nxv8i32_nxv8i64_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv8i32_nxv8i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vnclipu.wi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1823,7 +1853,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv64.ll @@ -26,12 +26,13 @@ , , , + i64, i64); define @intrinsic_vnclipu_mask_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vnclipu.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -70,12 +71,13 @@ , , , + i64, i64); define @intrinsic_vnclipu_mask_wv_nxv2i8_nxv2i16_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv2i8_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vnclipu.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -114,12 +116,13 @@ , , , + i64, i64); define @intrinsic_vnclipu_mask_wv_nxv4i8_nxv4i16_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv4i8_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vnclipu.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -159,12 +162,13 @@ , , , + i64, i64); define @intrinsic_vnclipu_mask_wv_nxv8i8_nxv8i16_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv8i8_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vnclipu.wv v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -173,7 +177,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -204,12 +208,13 @@ , , , + i64, i64); define @intrinsic_vnclipu_mask_wv_nxv16i8_nxv16i16_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv16i8_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vnclipu.wv v8, v12, v10, v0.t ; CHECK-NEXT: ret entry: @@ -218,7 +223,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -249,12 +254,13 @@ , , , + i64, i64); define @intrinsic_vnclipu_mask_wv_nxv32i8_nxv32i16_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv32i8_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vnclipu.wv v8, v16, v12, v0.t ; CHECK-NEXT: ret entry: @@ -263,7 +269,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -293,12 +299,13 @@ , , , + i64, i64); define @intrinsic_vnclipu_mask_wv_nxv1i16_nxv1i32_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv1i16_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vnclipu.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -307,7 +314,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -337,12 +344,13 @@ , , , + i64, i64); define @intrinsic_vnclipu_mask_wv_nxv2i16_nxv2i32_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv2i16_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vnclipu.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -351,7 +359,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -382,12 +390,13 @@ , , , + i64, i64); define @intrinsic_vnclipu_mask_wv_nxv4i16_nxv4i32_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv4i16_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vnclipu.wv v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -396,7 +405,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -427,12 +436,13 @@ , , , + i64, i64); define @intrinsic_vnclipu_mask_wv_nxv8i16_nxv8i32_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv8i16_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vnclipu.wv v8, v12, v10, v0.t ; CHECK-NEXT: ret entry: @@ -441,7 +451,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -472,12 +482,13 @@ , , , + i64, i64); define @intrinsic_vnclipu_mask_wv_nxv16i16_nxv16i32_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv16i16_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vnclipu.wv v8, v16, v12, v0.t ; CHECK-NEXT: ret entry: @@ -486,7 +497,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -516,12 +527,13 @@ , , , + i64, i64); define @intrinsic_vnclipu_mask_wv_nxv1i32_nxv1i64_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv1i32_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vnclipu.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -530,7 +542,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -561,12 +573,13 @@ , , , + i64, i64); define @intrinsic_vnclipu_mask_wv_nxv2i32_nxv2i64_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv2i32_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vnclipu.wv v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -575,7 +588,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -606,12 +619,13 @@ , , , + i64, i64); define @intrinsic_vnclipu_mask_wv_nxv4i32_nxv4i64_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv4i32_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vnclipu.wv v8, v12, v10, v0.t ; CHECK-NEXT: ret entry: @@ -620,7 +634,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -651,12 +665,13 @@ , , , + i64, i64); define @intrinsic_vnclipu_mask_wv_nxv8i32_nxv8i64_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv8i32_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vnclipu.wv v8, v16, v12, v0.t ; CHECK-NEXT: ret entry: @@ -665,7 +680,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -695,12 +710,13 @@ , i64, , + i64, i64); define @intrinsic_vnclipu_mask_vx_nxv1i8_nxv1i16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vnclipu.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -709,7 +725,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -739,12 +755,13 @@ , i64, , + i64, i64); define @intrinsic_vnclipu_mask_vx_nxv2i8_nxv2i16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vnclipu.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -753,7 +770,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -783,12 +800,13 @@ , i64, , + i64, i64); define @intrinsic_vnclipu_mask_vx_nxv4i8_nxv4i16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vnclipu.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -797,7 +815,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -828,12 +846,13 @@ , i64, , + i64, i64); define @intrinsic_vnclipu_mask_vx_nxv8i8_nxv8i16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vnclipu.wx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -842,7 +861,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -873,12 +892,13 @@ , i64, , + i64, i64); define @intrinsic_vnclipu_mask_vx_nxv16i8_nxv16i16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vnclipu.wx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -887,7 +907,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -918,12 +938,13 @@ , i64, , + i64, i64); define @intrinsic_vnclipu_mask_vx_nxv32i8_nxv32i16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vnclipu.wx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -932,7 +953,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -962,12 +983,13 @@ , i64, , + i64, i64); define @intrinsic_vnclipu_mask_vx_nxv1i16_nxv1i32( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vnclipu.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -976,7 +998,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1006,12 +1028,13 @@ , i64, , + i64, i64); define @intrinsic_vnclipu_mask_vx_nxv2i16_nxv2i32( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vnclipu.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1020,7 +1043,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1051,12 +1074,13 @@ , i64, , + i64, i64); define @intrinsic_vnclipu_mask_vx_nxv4i16_nxv4i32( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vnclipu.wx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1065,7 +1089,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1096,12 +1120,13 @@ , i64, , + i64, i64); define @intrinsic_vnclipu_mask_vx_nxv8i16_nxv8i32( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vnclipu.wx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1110,7 +1135,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1141,12 +1166,13 @@ , i64, , + i64, i64); define @intrinsic_vnclipu_mask_vx_nxv16i16_nxv16i32( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vnclipu.wx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1155,7 +1181,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1185,12 +1211,13 @@ , i64, , + i64, i64); define @intrinsic_vnclipu_mask_vx_nxv1i32_nxv1i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vnclipu.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1199,7 +1226,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1230,12 +1257,13 @@ , i64, , + i64, i64); define @intrinsic_vnclipu_mask_vx_nxv2i32_nxv2i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vnclipu.wx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1244,7 +1272,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1275,12 +1303,13 @@ , i64, , + i64, i64); define @intrinsic_vnclipu_mask_vx_nxv4i32_nxv4i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vnclipu.wx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1289,7 +1318,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1320,12 +1349,13 @@ , i64, , + i64, i64); define @intrinsic_vnclipu_mask_vx_nxv8i32_nxv8i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vnclipu.wx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1334,7 +1364,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1357,7 +1387,7 @@ define @intrinsic_vnclipu_mask_vi_nxv1i8_nxv1i16_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv1i8_nxv1i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vnclipu.wi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1366,7 +1396,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1389,7 +1419,7 @@ define @intrinsic_vnclipu_mask_vi_nxv2i8_nxv2i16_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv2i8_nxv2i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vnclipu.wi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1398,7 +1428,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1421,7 +1451,7 @@ define @intrinsic_vnclipu_mask_vi_nxv4i8_nxv4i16_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv4i8_nxv4i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vnclipu.wi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1430,7 +1460,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1454,7 +1484,7 @@ define @intrinsic_vnclipu_mask_vi_nxv8i8_nxv8i16_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv8i8_nxv8i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vnclipu.wi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1463,7 +1493,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1487,7 +1517,7 @@ define @intrinsic_vnclipu_mask_vi_nxv16i8_nxv16i16_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv16i8_nxv16i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vnclipu.wi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1496,7 +1526,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1520,7 +1550,7 @@ define @intrinsic_vnclipu_mask_vi_nxv32i8_nxv32i16_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv32i8_nxv32i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vnclipu.wi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1529,7 +1559,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1552,7 +1582,7 @@ define @intrinsic_vnclipu_mask_vi_nxv1i16_nxv1i32_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv1i16_nxv1i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vnclipu.wi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1561,7 +1591,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1584,7 +1614,7 @@ define @intrinsic_vnclipu_mask_vi_nxv2i16_nxv2i32_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv2i16_nxv2i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vnclipu.wi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1593,7 +1623,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1617,7 +1647,7 @@ define @intrinsic_vnclipu_mask_vi_nxv4i16_nxv4i32_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv4i16_nxv4i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vnclipu.wi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1626,7 +1656,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1650,7 +1680,7 @@ define @intrinsic_vnclipu_mask_vi_nxv8i16_nxv8i32_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv8i16_nxv8i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vnclipu.wi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1659,7 +1689,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1683,7 +1713,7 @@ define @intrinsic_vnclipu_mask_vi_nxv16i16_nxv16i32_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv16i16_nxv16i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vnclipu.wi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1692,7 +1722,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1715,7 +1745,7 @@ define @intrinsic_vnclipu_mask_vi_nxv1i32_nxv1i64_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv1i32_nxv1i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vnclipu.wi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1724,7 +1754,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1748,7 +1778,7 @@ define @intrinsic_vnclipu_mask_vi_nxv2i32_nxv2i64_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv2i32_nxv2i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vnclipu.wi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1757,7 +1787,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1781,7 +1811,7 @@ define @intrinsic_vnclipu_mask_vi_nxv4i32_nxv4i64_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv4i32_nxv4i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vnclipu.wi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1790,7 +1820,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1814,7 +1844,7 @@ define @intrinsic_vnclipu_mask_vi_nxv8i32_nxv8i64_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv8i32_nxv8i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vnclipu.wi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1823,7 +1853,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vnsra-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnsra-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnsra-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnsra-rv32.ll @@ -26,12 +26,13 @@ , , , + i32, i32); define @intrinsic_vnsra_mask_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vnsra.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -70,12 +71,13 @@ , , , + i32, i32); define @intrinsic_vnsra_mask_wv_nxv2i8_nxv2i16_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv2i8_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vnsra.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -114,12 +116,13 @@ , , , + i32, i32); define @intrinsic_vnsra_mask_wv_nxv4i8_nxv4i16_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv4i8_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vnsra.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -159,12 +162,13 @@ , , , + i32, i32); define @intrinsic_vnsra_mask_wv_nxv8i8_nxv8i16_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv8i8_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vnsra.wv v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -173,7 +177,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -204,12 +208,13 @@ , , , + i32, i32); define @intrinsic_vnsra_mask_wv_nxv16i8_nxv16i16_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv16i8_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vnsra.wv v8, v12, v10, v0.t ; CHECK-NEXT: ret entry: @@ -218,7 +223,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -249,12 +254,13 @@ , , , + i32, i32); define @intrinsic_vnsra_mask_wv_nxv32i8_nxv32i16_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv32i8_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vnsra.wv v8, v16, v12, v0.t ; CHECK-NEXT: ret entry: @@ -263,7 +269,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -293,12 +299,13 @@ , , , + i32, i32); define @intrinsic_vnsra_mask_wv_nxv1i16_nxv1i32_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv1i16_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vnsra.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -307,7 +314,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -337,12 +344,13 @@ , , , + i32, i32); define @intrinsic_vnsra_mask_wv_nxv2i16_nxv2i32_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv2i16_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vnsra.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -351,7 +359,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -382,12 +390,13 @@ , , , + i32, i32); define @intrinsic_vnsra_mask_wv_nxv4i16_nxv4i32_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv4i16_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vnsra.wv v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -396,7 +405,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -427,12 +436,13 @@ , , , + i32, i32); define @intrinsic_vnsra_mask_wv_nxv8i16_nxv8i32_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv8i16_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vnsra.wv v8, v12, v10, v0.t ; CHECK-NEXT: ret entry: @@ -441,7 +451,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -472,12 +482,13 @@ , , , + i32, i32); define @intrinsic_vnsra_mask_wv_nxv16i16_nxv16i32_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv16i16_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vnsra.wv v8, v16, v12, v0.t ; CHECK-NEXT: ret entry: @@ -486,7 +497,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -516,12 +527,13 @@ , , , + i32, i32); define @intrinsic_vnsra_mask_wv_nxv1i32_nxv1i64_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv1i32_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vnsra.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -530,7 +542,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -561,12 +573,13 @@ , , , + i32, i32); define @intrinsic_vnsra_mask_wv_nxv2i32_nxv2i64_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv2i32_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vnsra.wv v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -575,7 +588,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -606,12 +619,13 @@ , , , + i32, i32); define @intrinsic_vnsra_mask_wv_nxv4i32_nxv4i64_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv4i32_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vnsra.wv v8, v12, v10, v0.t ; CHECK-NEXT: ret entry: @@ -620,7 +634,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -651,12 +665,13 @@ , , , + i32, i32); define @intrinsic_vnsra_mask_wv_nxv8i32_nxv8i64_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv8i32_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vnsra.wv v8, v16, v12, v0.t ; CHECK-NEXT: ret entry: @@ -665,7 +680,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -695,12 +710,13 @@ , i32, , + i32, i32); define @intrinsic_vnsra_mask_vx_nxv1i8_nxv1i16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vnsra.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -709,7 +725,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -739,12 +755,13 @@ , i32, , + i32, i32); define @intrinsic_vnsra_mask_vx_nxv2i8_nxv2i16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vnsra.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -753,7 +770,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -783,12 +800,13 @@ , i32, , + i32, i32); define @intrinsic_vnsra_mask_vx_nxv4i8_nxv4i16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vnsra.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -797,7 +815,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -828,12 +846,13 @@ , i32, , + i32, i32); define @intrinsic_vnsra_mask_vx_nxv8i8_nxv8i16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vnsra.wx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -842,7 +861,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -873,12 +892,13 @@ , i32, , + i32, i32); define @intrinsic_vnsra_mask_vx_nxv16i8_nxv16i16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vnsra.wx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -887,7 +907,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -918,12 +938,13 @@ , i32, , + i32, i32); define @intrinsic_vnsra_mask_vx_nxv32i8_nxv32i16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vnsra.wx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -932,7 +953,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -962,12 +983,13 @@ , i32, , + i32, i32); define @intrinsic_vnsra_mask_vx_nxv1i16_nxv1i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vnsra.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -976,7 +998,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1006,12 +1028,13 @@ , i32, , + i32, i32); define @intrinsic_vnsra_mask_vx_nxv2i16_nxv2i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vnsra.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1020,7 +1043,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1051,12 +1074,13 @@ , i32, , + i32, i32); define @intrinsic_vnsra_mask_vx_nxv4i16_nxv4i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vnsra.wx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1065,7 +1089,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1096,12 +1120,13 @@ , i32, , + i32, i32); define @intrinsic_vnsra_mask_vx_nxv8i16_nxv8i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vnsra.wx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1110,7 +1135,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1141,12 +1166,13 @@ , i32, , + i32, i32); define @intrinsic_vnsra_mask_vx_nxv16i16_nxv16i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vnsra.wx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1155,7 +1181,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1185,12 +1211,13 @@ , i32, , + i32, i32); define @intrinsic_vnsra_mask_vx_nxv1i32_nxv1i64( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vnsra.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1199,7 +1226,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1230,12 +1257,13 @@ , i32, , + i32, i32); define @intrinsic_vnsra_mask_vx_nxv2i32_nxv2i64( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vnsra.wx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1244,7 +1272,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1275,12 +1303,13 @@ , i32, , + i32, i32); define @intrinsic_vnsra_mask_vx_nxv4i32_nxv4i64( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vnsra.wx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1289,7 +1318,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1320,12 +1349,13 @@ , i32, , + i32, i32); define @intrinsic_vnsra_mask_vx_nxv8i32_nxv8i64( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vnsra.wx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1334,7 +1364,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1357,7 +1387,7 @@ define @intrinsic_vnsra_mask_vi_nxv1i8_nxv1i16_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv1i8_nxv1i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vnsra.wi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1366,7 +1396,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1389,7 +1419,7 @@ define @intrinsic_vnsra_mask_vi_nxv2i8_nxv2i16_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv2i8_nxv2i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vnsra.wi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1398,7 +1428,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1421,7 +1451,7 @@ define @intrinsic_vnsra_mask_vi_nxv4i8_nxv4i16_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv4i8_nxv4i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vnsra.wi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1430,7 +1460,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1454,7 +1484,7 @@ define @intrinsic_vnsra_mask_vi_nxv8i8_nxv8i16_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv8i8_nxv8i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vnsra.wi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1463,7 +1493,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1487,7 +1517,7 @@ define @intrinsic_vnsra_mask_vi_nxv16i8_nxv16i16_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv16i8_nxv16i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vnsra.wi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1496,7 +1526,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1520,7 +1550,7 @@ define @intrinsic_vnsra_mask_vi_nxv32i8_nxv32i16_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv32i8_nxv32i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vnsra.wi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1529,7 +1559,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1552,7 +1582,7 @@ define @intrinsic_vnsra_mask_vi_nxv1i16_nxv1i32_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv1i16_nxv1i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vnsra.wi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1561,7 +1591,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1584,7 +1614,7 @@ define @intrinsic_vnsra_mask_vi_nxv2i16_nxv2i32_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv2i16_nxv2i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vnsra.wi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1593,7 +1623,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1617,7 +1647,7 @@ define @intrinsic_vnsra_mask_vi_nxv4i16_nxv4i32_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv4i16_nxv4i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vnsra.wi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1626,7 +1656,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1650,7 +1680,7 @@ define @intrinsic_vnsra_mask_vi_nxv8i16_nxv8i32_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv8i16_nxv8i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vnsra.wi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1659,7 +1689,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1683,7 +1713,7 @@ define @intrinsic_vnsra_mask_vi_nxv16i16_nxv16i32_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv16i16_nxv16i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vnsra.wi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1692,7 +1722,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1715,7 +1745,7 @@ define @intrinsic_vnsra_mask_vi_nxv1i32_nxv1i64_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv1i32_nxv1i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vnsra.wi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1724,7 +1754,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1748,7 +1778,7 @@ define @intrinsic_vnsra_mask_vi_nxv2i32_nxv2i64_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv2i32_nxv2i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vnsra.wi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1757,7 +1787,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1781,7 +1811,7 @@ define @intrinsic_vnsra_mask_vi_nxv4i32_nxv4i64_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv4i32_nxv4i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vnsra.wi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1790,7 +1820,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1814,7 +1844,7 @@ define @intrinsic_vnsra_mask_vi_nxv8i32_nxv8i64_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv8i32_nxv8i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vnsra.wi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1823,7 +1853,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vnsra-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnsra-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnsra-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnsra-rv64.ll @@ -26,12 +26,13 @@ , , , + i64, i64); define @intrinsic_vnsra_mask_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vnsra.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -70,12 +71,13 @@ , , , + i64, i64); define @intrinsic_vnsra_mask_wv_nxv2i8_nxv2i16_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv2i8_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vnsra.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -114,12 +116,13 @@ , , , + i64, i64); define @intrinsic_vnsra_mask_wv_nxv4i8_nxv4i16_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv4i8_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vnsra.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -159,12 +162,13 @@ , , , + i64, i64); define @intrinsic_vnsra_mask_wv_nxv8i8_nxv8i16_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv8i8_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vnsra.wv v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -173,7 +177,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -204,12 +208,13 @@ , , , + i64, i64); define @intrinsic_vnsra_mask_wv_nxv16i8_nxv16i16_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv16i8_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vnsra.wv v8, v12, v10, v0.t ; CHECK-NEXT: ret entry: @@ -218,7 +223,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -249,12 +254,13 @@ , , , + i64, i64); define @intrinsic_vnsra_mask_wv_nxv32i8_nxv32i16_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv32i8_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vnsra.wv v8, v16, v12, v0.t ; CHECK-NEXT: ret entry: @@ -263,7 +269,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -293,12 +299,13 @@ , , , + i64, i64); define @intrinsic_vnsra_mask_wv_nxv1i16_nxv1i32_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv1i16_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vnsra.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -307,7 +314,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -337,12 +344,13 @@ , , , + i64, i64); define @intrinsic_vnsra_mask_wv_nxv2i16_nxv2i32_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv2i16_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vnsra.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -351,7 +359,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -382,12 +390,13 @@ , , , + i64, i64); define @intrinsic_vnsra_mask_wv_nxv4i16_nxv4i32_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv4i16_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vnsra.wv v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -396,7 +405,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -427,12 +436,13 @@ , , , + i64, i64); define @intrinsic_vnsra_mask_wv_nxv8i16_nxv8i32_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv8i16_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vnsra.wv v8, v12, v10, v0.t ; CHECK-NEXT: ret entry: @@ -441,7 +451,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -472,12 +482,13 @@ , , , + i64, i64); define @intrinsic_vnsra_mask_wv_nxv16i16_nxv16i32_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv16i16_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vnsra.wv v8, v16, v12, v0.t ; CHECK-NEXT: ret entry: @@ -486,7 +497,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -516,12 +527,13 @@ , , , + i64, i64); define @intrinsic_vnsra_mask_wv_nxv1i32_nxv1i64_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv1i32_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vnsra.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -530,7 +542,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -561,12 +573,13 @@ , , , + i64, i64); define @intrinsic_vnsra_mask_wv_nxv2i32_nxv2i64_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv2i32_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vnsra.wv v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -575,7 +588,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -606,12 +619,13 @@ , , , + i64, i64); define @intrinsic_vnsra_mask_wv_nxv4i32_nxv4i64_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv4i32_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vnsra.wv v8, v12, v10, v0.t ; CHECK-NEXT: ret entry: @@ -620,7 +634,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -651,12 +665,13 @@ , , , + i64, i64); define @intrinsic_vnsra_mask_wv_nxv8i32_nxv8i64_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv8i32_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vnsra.wv v8, v16, v12, v0.t ; CHECK-NEXT: ret entry: @@ -665,7 +680,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -695,12 +710,13 @@ , i64, , + i64, i64); define @intrinsic_vnsra_mask_vx_nxv1i8_nxv1i16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vnsra.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -709,7 +725,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -739,12 +755,13 @@ , i64, , + i64, i64); define @intrinsic_vnsra_mask_vx_nxv2i8_nxv2i16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vnsra.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -753,7 +770,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -783,12 +800,13 @@ , i64, , + i64, i64); define @intrinsic_vnsra_mask_vx_nxv4i8_nxv4i16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vnsra.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -797,7 +815,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -828,12 +846,13 @@ , i64, , + i64, i64); define @intrinsic_vnsra_mask_vx_nxv8i8_nxv8i16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vnsra.wx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -842,7 +861,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -873,12 +892,13 @@ , i64, , + i64, i64); define @intrinsic_vnsra_mask_vx_nxv16i8_nxv16i16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vnsra.wx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -887,7 +907,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -918,12 +938,13 @@ , i64, , + i64, i64); define @intrinsic_vnsra_mask_vx_nxv32i8_nxv32i16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vnsra.wx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -932,7 +953,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -962,12 +983,13 @@ , i64, , + i64, i64); define @intrinsic_vnsra_mask_vx_nxv1i16_nxv1i32( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vnsra.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -976,7 +998,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1006,12 +1028,13 @@ , i64, , + i64, i64); define @intrinsic_vnsra_mask_vx_nxv2i16_nxv2i32( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vnsra.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1020,7 +1043,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1051,12 +1074,13 @@ , i64, , + i64, i64); define @intrinsic_vnsra_mask_vx_nxv4i16_nxv4i32( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vnsra.wx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1065,7 +1089,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1096,12 +1120,13 @@ , i64, , + i64, i64); define @intrinsic_vnsra_mask_vx_nxv8i16_nxv8i32( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vnsra.wx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1110,7 +1135,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1141,12 +1166,13 @@ , i64, , + i64, i64); define @intrinsic_vnsra_mask_vx_nxv16i16_nxv16i32( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vnsra.wx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1155,7 +1181,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1185,12 +1211,13 @@ , i64, , + i64, i64); define @intrinsic_vnsra_mask_vx_nxv1i32_nxv1i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vnsra.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1199,7 +1226,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1230,12 +1257,13 @@ , i64, , + i64, i64); define @intrinsic_vnsra_mask_vx_nxv2i32_nxv2i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vnsra.wx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1244,7 +1272,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1275,12 +1303,13 @@ , i64, , + i64, i64); define @intrinsic_vnsra_mask_vx_nxv4i32_nxv4i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vnsra.wx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1289,7 +1318,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1320,12 +1349,13 @@ , i64, , + i64, i64); define @intrinsic_vnsra_mask_vx_nxv8i32_nxv8i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vnsra.wx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1334,7 +1364,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1357,7 +1387,7 @@ define @intrinsic_vnsra_mask_vi_nxv1i8_nxv1i16_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv1i8_nxv1i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vnsra.wi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1366,7 +1396,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1389,7 +1419,7 @@ define @intrinsic_vnsra_mask_vi_nxv2i8_nxv2i16_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv2i8_nxv2i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vnsra.wi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1398,7 +1428,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1421,7 +1451,7 @@ define @intrinsic_vnsra_mask_vi_nxv4i8_nxv4i16_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv4i8_nxv4i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vnsra.wi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1430,7 +1460,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1454,7 +1484,7 @@ define @intrinsic_vnsra_mask_vi_nxv8i8_nxv8i16_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv8i8_nxv8i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vnsra.wi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1463,7 +1493,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1487,7 +1517,7 @@ define @intrinsic_vnsra_mask_vi_nxv16i8_nxv16i16_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv16i8_nxv16i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vnsra.wi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1496,7 +1526,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1520,7 +1550,7 @@ define @intrinsic_vnsra_mask_vi_nxv32i8_nxv32i16_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv32i8_nxv32i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vnsra.wi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1529,7 +1559,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1552,7 +1582,7 @@ define @intrinsic_vnsra_mask_vi_nxv1i16_nxv1i32_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv1i16_nxv1i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vnsra.wi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1561,7 +1591,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1584,7 +1614,7 @@ define @intrinsic_vnsra_mask_vi_nxv2i16_nxv2i32_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv2i16_nxv2i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vnsra.wi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1593,7 +1623,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1617,7 +1647,7 @@ define @intrinsic_vnsra_mask_vi_nxv4i16_nxv4i32_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv4i16_nxv4i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vnsra.wi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1626,7 +1656,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1650,7 +1680,7 @@ define @intrinsic_vnsra_mask_vi_nxv8i16_nxv8i32_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv8i16_nxv8i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vnsra.wi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1659,7 +1689,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1683,7 +1713,7 @@ define @intrinsic_vnsra_mask_vi_nxv16i16_nxv16i32_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv16i16_nxv16i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vnsra.wi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1692,7 +1722,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1715,7 +1745,7 @@ define @intrinsic_vnsra_mask_vi_nxv1i32_nxv1i64_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv1i32_nxv1i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vnsra.wi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1724,7 +1754,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1748,7 +1778,7 @@ define @intrinsic_vnsra_mask_vi_nxv2i32_nxv2i64_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv2i32_nxv2i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vnsra.wi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1757,7 +1787,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1781,7 +1811,7 @@ define @intrinsic_vnsra_mask_vi_nxv4i32_nxv4i64_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv4i32_nxv4i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vnsra.wi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1790,7 +1820,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1814,7 +1844,7 @@ define @intrinsic_vnsra_mask_vi_nxv8i32_nxv8i64_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv8i32_nxv8i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vnsra.wi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1823,7 +1853,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv32.ll @@ -26,12 +26,13 @@ , , , + i32, i32); define @intrinsic_vnsrl_mask_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vnsrl.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -70,12 +71,13 @@ , , , + i32, i32); define @intrinsic_vnsrl_mask_wv_nxv2i8_nxv2i16_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv2i8_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vnsrl.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -114,12 +116,13 @@ , , , + i32, i32); define @intrinsic_vnsrl_mask_wv_nxv4i8_nxv4i16_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv4i8_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vnsrl.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -159,12 +162,13 @@ , , , + i32, i32); define @intrinsic_vnsrl_mask_wv_nxv8i8_nxv8i16_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv8i8_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vnsrl.wv v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -173,7 +177,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -204,12 +208,13 @@ , , , + i32, i32); define @intrinsic_vnsrl_mask_wv_nxv16i8_nxv16i16_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv16i8_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vnsrl.wv v8, v12, v10, v0.t ; CHECK-NEXT: ret entry: @@ -218,7 +223,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -249,12 +254,13 @@ , , , + i32, i32); define @intrinsic_vnsrl_mask_wv_nxv32i8_nxv32i16_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv32i8_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vnsrl.wv v8, v16, v12, v0.t ; CHECK-NEXT: ret entry: @@ -263,7 +269,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -293,12 +299,13 @@ , , , + i32, i32); define @intrinsic_vnsrl_mask_wv_nxv1i16_nxv1i32_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv1i16_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vnsrl.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -307,7 +314,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -337,12 +344,13 @@ , , , + i32, i32); define @intrinsic_vnsrl_mask_wv_nxv2i16_nxv2i32_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv2i16_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vnsrl.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -351,7 +359,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -382,12 +390,13 @@ , , , + i32, i32); define @intrinsic_vnsrl_mask_wv_nxv4i16_nxv4i32_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv4i16_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vnsrl.wv v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -396,7 +405,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -427,12 +436,13 @@ , , , + i32, i32); define @intrinsic_vnsrl_mask_wv_nxv8i16_nxv8i32_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv8i16_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vnsrl.wv v8, v12, v10, v0.t ; CHECK-NEXT: ret entry: @@ -441,7 +451,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -472,12 +482,13 @@ , , , + i32, i32); define @intrinsic_vnsrl_mask_wv_nxv16i16_nxv16i32_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv16i16_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vnsrl.wv v8, v16, v12, v0.t ; CHECK-NEXT: ret entry: @@ -486,7 +497,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -516,12 +527,13 @@ , , , + i32, i32); define @intrinsic_vnsrl_mask_wv_nxv1i32_nxv1i64_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv1i32_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vnsrl.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -530,7 +542,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -561,12 +573,13 @@ , , , + i32, i32); define @intrinsic_vnsrl_mask_wv_nxv2i32_nxv2i64_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv2i32_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vnsrl.wv v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -575,7 +588,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -606,12 +619,13 @@ , , , + i32, i32); define @intrinsic_vnsrl_mask_wv_nxv4i32_nxv4i64_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv4i32_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vnsrl.wv v8, v12, v10, v0.t ; CHECK-NEXT: ret entry: @@ -620,7 +634,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -651,12 +665,13 @@ , , , + i32, i32); define @intrinsic_vnsrl_mask_wv_nxv8i32_nxv8i64_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv8i32_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vnsrl.wv v8, v16, v12, v0.t ; CHECK-NEXT: ret entry: @@ -665,7 +680,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -695,12 +710,13 @@ , i32, , + i32, i32); define @intrinsic_vnsrl_mask_vx_nxv1i8_nxv1i16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vnsrl.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -709,7 +725,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -739,12 +755,13 @@ , i32, , + i32, i32); define @intrinsic_vnsrl_mask_vx_nxv2i8_nxv2i16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vnsrl.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -753,7 +770,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -783,12 +800,13 @@ , i32, , + i32, i32); define @intrinsic_vnsrl_mask_vx_nxv4i8_nxv4i16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vnsrl.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -797,7 +815,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -828,12 +846,13 @@ , i32, , + i32, i32); define @intrinsic_vnsrl_mask_vx_nxv8i8_nxv8i16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vnsrl.wx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -842,7 +861,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -873,12 +892,13 @@ , i32, , + i32, i32); define @intrinsic_vnsrl_mask_vx_nxv16i8_nxv16i16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vnsrl.wx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -887,7 +907,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -918,12 +938,13 @@ , i32, , + i32, i32); define @intrinsic_vnsrl_mask_vx_nxv32i8_nxv32i16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vnsrl.wx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -932,7 +953,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -962,12 +983,13 @@ , i32, , + i32, i32); define @intrinsic_vnsrl_mask_vx_nxv1i16_nxv1i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vnsrl.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -976,7 +998,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1006,12 +1028,13 @@ , i32, , + i32, i32); define @intrinsic_vnsrl_mask_vx_nxv2i16_nxv2i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vnsrl.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1020,7 +1043,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1051,12 +1074,13 @@ , i32, , + i32, i32); define @intrinsic_vnsrl_mask_vx_nxv4i16_nxv4i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vnsrl.wx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1065,7 +1089,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1096,12 +1120,13 @@ , i32, , + i32, i32); define @intrinsic_vnsrl_mask_vx_nxv8i16_nxv8i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vnsrl.wx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1110,7 +1135,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1141,12 +1166,13 @@ , i32, , + i32, i32); define @intrinsic_vnsrl_mask_vx_nxv16i16_nxv16i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vnsrl.wx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1155,7 +1181,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1185,12 +1211,13 @@ , i32, , + i32, i32); define @intrinsic_vnsrl_mask_vx_nxv1i32_nxv1i64( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vnsrl.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1199,7 +1226,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1230,12 +1257,13 @@ , i32, , + i32, i32); define @intrinsic_vnsrl_mask_vx_nxv2i32_nxv2i64( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vnsrl.wx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1244,7 +1272,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1275,12 +1303,13 @@ , i32, , + i32, i32); define @intrinsic_vnsrl_mask_vx_nxv4i32_nxv4i64( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vnsrl.wx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1289,7 +1318,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1320,12 +1349,13 @@ , i32, , + i32, i32); define @intrinsic_vnsrl_mask_vx_nxv8i32_nxv8i64( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vnsrl.wx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1334,7 +1364,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1357,7 +1387,7 @@ define @intrinsic_vnsrl_mask_vi_nxv1i8_nxv1i16_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv1i8_nxv1i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1366,7 +1396,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1389,7 +1419,7 @@ define @intrinsic_vnsrl_mask_vi_nxv2i8_nxv2i16_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv2i8_nxv2i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1398,7 +1428,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1421,7 +1451,7 @@ define @intrinsic_vnsrl_mask_vi_nxv4i8_nxv4i16_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv4i8_nxv4i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1430,7 +1460,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1454,7 +1484,7 @@ define @intrinsic_vnsrl_mask_vi_nxv8i8_nxv8i16_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv8i8_nxv8i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1463,7 +1493,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1487,7 +1517,7 @@ define @intrinsic_vnsrl_mask_vi_nxv16i8_nxv16i16_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv16i8_nxv16i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1496,7 +1526,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1520,7 +1550,7 @@ define @intrinsic_vnsrl_mask_vi_nxv32i8_nxv32i16_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv32i8_nxv32i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1529,7 +1559,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1552,7 +1582,7 @@ define @intrinsic_vnsrl_mask_vi_nxv1i16_nxv1i32_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv1i16_nxv1i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1561,7 +1591,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1584,7 +1614,7 @@ define @intrinsic_vnsrl_mask_vi_nxv2i16_nxv2i32_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv2i16_nxv2i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1593,7 +1623,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1617,7 +1647,7 @@ define @intrinsic_vnsrl_mask_vi_nxv4i16_nxv4i32_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv4i16_nxv4i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1626,7 +1656,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1650,7 +1680,7 @@ define @intrinsic_vnsrl_mask_vi_nxv8i16_nxv8i32_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv8i16_nxv8i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1659,7 +1689,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1683,7 +1713,7 @@ define @intrinsic_vnsrl_mask_vi_nxv16i16_nxv16i32_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv16i16_nxv16i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1692,7 +1722,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1715,7 +1745,7 @@ define @intrinsic_vnsrl_mask_vi_nxv1i32_nxv1i64_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv1i32_nxv1i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1724,7 +1754,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1748,7 +1778,7 @@ define @intrinsic_vnsrl_mask_vi_nxv2i32_nxv2i64_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv2i32_nxv2i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1757,7 +1787,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1781,7 +1811,7 @@ define @intrinsic_vnsrl_mask_vi_nxv4i32_nxv4i64_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv4i32_nxv4i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1790,7 +1820,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1814,7 +1844,7 @@ define @intrinsic_vnsrl_mask_vi_nxv8i32_nxv8i64_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv8i32_nxv8i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1823,7 +1853,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv64.ll @@ -26,12 +26,13 @@ , , , + i64, i64); define @intrinsic_vnsrl_mask_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vnsrl.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -70,12 +71,13 @@ , , , + i64, i64); define @intrinsic_vnsrl_mask_wv_nxv2i8_nxv2i16_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv2i8_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vnsrl.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -114,12 +116,13 @@ , , , + i64, i64); define @intrinsic_vnsrl_mask_wv_nxv4i8_nxv4i16_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv4i8_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vnsrl.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -159,12 +162,13 @@ , , , + i64, i64); define @intrinsic_vnsrl_mask_wv_nxv8i8_nxv8i16_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv8i8_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vnsrl.wv v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -173,7 +177,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -204,12 +208,13 @@ , , , + i64, i64); define @intrinsic_vnsrl_mask_wv_nxv16i8_nxv16i16_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv16i8_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vnsrl.wv v8, v12, v10, v0.t ; CHECK-NEXT: ret entry: @@ -218,7 +223,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -249,12 +254,13 @@ , , , + i64, i64); define @intrinsic_vnsrl_mask_wv_nxv32i8_nxv32i16_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv32i8_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vnsrl.wv v8, v16, v12, v0.t ; CHECK-NEXT: ret entry: @@ -263,7 +269,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -293,12 +299,13 @@ , , , + i64, i64); define @intrinsic_vnsrl_mask_wv_nxv1i16_nxv1i32_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv1i16_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vnsrl.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -307,7 +314,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -337,12 +344,13 @@ , , , + i64, i64); define @intrinsic_vnsrl_mask_wv_nxv2i16_nxv2i32_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv2i16_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vnsrl.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -351,7 +359,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -382,12 +390,13 @@ , , , + i64, i64); define @intrinsic_vnsrl_mask_wv_nxv4i16_nxv4i32_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv4i16_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vnsrl.wv v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -396,7 +405,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -427,12 +436,13 @@ , , , + i64, i64); define @intrinsic_vnsrl_mask_wv_nxv8i16_nxv8i32_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv8i16_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vnsrl.wv v8, v12, v10, v0.t ; CHECK-NEXT: ret entry: @@ -441,7 +451,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -472,12 +482,13 @@ , , , + i64, i64); define @intrinsic_vnsrl_mask_wv_nxv16i16_nxv16i32_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv16i16_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vnsrl.wv v8, v16, v12, v0.t ; CHECK-NEXT: ret entry: @@ -486,7 +497,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -516,12 +527,13 @@ , , , + i64, i64); define @intrinsic_vnsrl_mask_wv_nxv1i32_nxv1i64_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv1i32_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vnsrl.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -530,7 +542,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -561,12 +573,13 @@ , , , + i64, i64); define @intrinsic_vnsrl_mask_wv_nxv2i32_nxv2i64_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv2i32_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vnsrl.wv v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -575,7 +588,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -606,12 +619,13 @@ , , , + i64, i64); define @intrinsic_vnsrl_mask_wv_nxv4i32_nxv4i64_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv4i32_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vnsrl.wv v8, v12, v10, v0.t ; CHECK-NEXT: ret entry: @@ -620,7 +634,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -651,12 +665,13 @@ , , , + i64, i64); define @intrinsic_vnsrl_mask_wv_nxv8i32_nxv8i64_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv8i32_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vnsrl.wv v8, v16, v12, v0.t ; CHECK-NEXT: ret entry: @@ -665,7 +680,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -695,12 +710,13 @@ , i64, , + i64, i64); define @intrinsic_vnsrl_mask_vx_nxv1i8_nxv1i16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vnsrl.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -709,7 +725,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -739,12 +755,13 @@ , i64, , + i64, i64); define @intrinsic_vnsrl_mask_vx_nxv2i8_nxv2i16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vnsrl.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -753,7 +770,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -783,12 +800,13 @@ , i64, , + i64, i64); define @intrinsic_vnsrl_mask_vx_nxv4i8_nxv4i16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vnsrl.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -797,7 +815,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -828,12 +846,13 @@ , i64, , + i64, i64); define @intrinsic_vnsrl_mask_vx_nxv8i8_nxv8i16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vnsrl.wx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -842,7 +861,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -873,12 +892,13 @@ , i64, , + i64, i64); define @intrinsic_vnsrl_mask_vx_nxv16i8_nxv16i16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vnsrl.wx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -887,7 +907,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -918,12 +938,13 @@ , i64, , + i64, i64); define @intrinsic_vnsrl_mask_vx_nxv32i8_nxv32i16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vnsrl.wx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -932,7 +953,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -962,12 +983,13 @@ , i64, , + i64, i64); define @intrinsic_vnsrl_mask_vx_nxv1i16_nxv1i32( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vnsrl.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -976,7 +998,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1006,12 +1028,13 @@ , i64, , + i64, i64); define @intrinsic_vnsrl_mask_vx_nxv2i16_nxv2i32( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vnsrl.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1020,7 +1043,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1051,12 +1074,13 @@ , i64, , + i64, i64); define @intrinsic_vnsrl_mask_vx_nxv4i16_nxv4i32( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vnsrl.wx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1065,7 +1089,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1096,12 +1120,13 @@ , i64, , + i64, i64); define @intrinsic_vnsrl_mask_vx_nxv8i16_nxv8i32( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vnsrl.wx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1110,7 +1135,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1141,12 +1166,13 @@ , i64, , + i64, i64); define @intrinsic_vnsrl_mask_vx_nxv16i16_nxv16i32( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vnsrl.wx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1155,7 +1181,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1185,12 +1211,13 @@ , i64, , + i64, i64); define @intrinsic_vnsrl_mask_vx_nxv1i32_nxv1i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vnsrl.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1199,7 +1226,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1230,12 +1257,13 @@ , i64, , + i64, i64); define @intrinsic_vnsrl_mask_vx_nxv2i32_nxv2i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vnsrl.wx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1244,7 +1272,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1275,12 +1303,13 @@ , i64, , + i64, i64); define @intrinsic_vnsrl_mask_vx_nxv4i32_nxv4i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vnsrl.wx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1289,7 +1318,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1320,12 +1349,13 @@ , i64, , + i64, i64); define @intrinsic_vnsrl_mask_vx_nxv8i32_nxv8i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vnsrl.wx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1334,7 +1364,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1357,7 +1387,7 @@ define @intrinsic_vnsrl_mask_vi_nxv1i8_nxv1i16_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv1i8_nxv1i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1366,7 +1396,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1389,7 +1419,7 @@ define @intrinsic_vnsrl_mask_vi_nxv2i8_nxv2i16_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv2i8_nxv2i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1398,7 +1428,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1421,7 +1451,7 @@ define @intrinsic_vnsrl_mask_vi_nxv4i8_nxv4i16_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv4i8_nxv4i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1430,7 +1460,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1454,7 +1484,7 @@ define @intrinsic_vnsrl_mask_vi_nxv8i8_nxv8i16_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv8i8_nxv8i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1463,7 +1493,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1487,7 +1517,7 @@ define @intrinsic_vnsrl_mask_vi_nxv16i8_nxv16i16_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv16i8_nxv16i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1496,7 +1526,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1520,7 +1550,7 @@ define @intrinsic_vnsrl_mask_vi_nxv32i8_nxv32i16_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv32i8_nxv32i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1529,7 +1559,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1552,7 +1582,7 @@ define @intrinsic_vnsrl_mask_vi_nxv1i16_nxv1i32_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv1i16_nxv1i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1561,7 +1591,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1584,7 +1614,7 @@ define @intrinsic_vnsrl_mask_vi_nxv2i16_nxv2i32_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv2i16_nxv2i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1593,7 +1623,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1617,7 +1647,7 @@ define @intrinsic_vnsrl_mask_vi_nxv4i16_nxv4i32_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv4i16_nxv4i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1626,7 +1656,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1650,7 +1680,7 @@ define @intrinsic_vnsrl_mask_vi_nxv8i16_nxv8i32_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv8i16_nxv8i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1659,7 +1689,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1683,7 +1713,7 @@ define @intrinsic_vnsrl_mask_vi_nxv16i16_nxv16i32_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv16i16_nxv16i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1692,7 +1722,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1715,7 +1745,7 @@ define @intrinsic_vnsrl_mask_vi_nxv1i32_nxv1i64_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv1i32_nxv1i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1724,7 +1754,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1748,7 +1778,7 @@ define @intrinsic_vnsrl_mask_vi_nxv2i32_nxv2i64_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv2i32_nxv2i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1757,7 +1787,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1781,7 +1811,7 @@ define @intrinsic_vnsrl_mask_vi_nxv4i32_nxv4i64_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv4i32_nxv4i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1790,7 +1820,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1814,7 +1844,7 @@ define @intrinsic_vnsrl_mask_vi_nxv8i32_nxv8i64_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv8i32_nxv8i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1823,7 +1853,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll @@ -26,12 +26,13 @@ , , , + i32, i32); define @intrinsic_vor_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vor.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -70,12 +71,13 @@ , , , + i32, i32); define @intrinsic_vor_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vor.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -114,12 +116,13 @@ , , , + i32, i32); define @intrinsic_vor_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vor.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -158,12 +161,13 @@ , , , + i32, i32); define @intrinsic_vor_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vor.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -202,12 +206,13 @@ , , , + i32, i32); define @intrinsic_vor_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vor.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -246,12 +251,13 @@ , , , + i32, i32); define @intrinsic_vor_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vor.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -260,7 +266,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -290,13 +296,14 @@ , , , + i32, i32); define @intrinsic_vor_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vor.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -335,12 +342,13 @@ , , , + i32, i32); define @intrinsic_vor_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vor.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -379,12 +387,13 @@ , , , + i32, i32); define @intrinsic_vor_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vor.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -423,12 +432,13 @@ , , , + i32, i32); define @intrinsic_vor_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vor.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -467,12 +477,13 @@ , , , + i32, i32); define @intrinsic_vor_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vor.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -481,7 +492,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -511,12 +522,13 @@ , , , + i32, i32); define @intrinsic_vor_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vor.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -525,7 +537,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -555,13 +567,14 @@ , , , + i32, i32); define @intrinsic_vor_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vor.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -600,12 +613,13 @@ , , , + i32, i32); define @intrinsic_vor_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vor.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -644,12 +658,13 @@ , , , + i32, i32); define @intrinsic_vor_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vor.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -658,7 +673,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -688,12 +703,13 @@ , , , + i32, i32); define @intrinsic_vor_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vor.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -702,7 +718,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -732,12 +748,13 @@ , , , + i32, i32); define @intrinsic_vor_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vor.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +763,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -776,13 +793,14 @@ , , , + i32, i32); define @intrinsic_vor_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vor.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +809,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -821,12 +839,13 @@ , , , + i32, i32); define @intrinsic_vor_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vor.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +854,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -865,12 +884,13 @@ , , , + i32, i32); define @intrinsic_vor_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vor.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +899,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -909,12 +929,13 @@ , , , + i32, i32); define @intrinsic_vor_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vor.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +944,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -953,13 +974,14 @@ , , , + i32, i32); define @intrinsic_vor_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vor.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -968,7 +990,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -998,12 +1020,13 @@ , i8, , + i32, i32); define @intrinsic_vor_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vor.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1012,7 +1035,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1042,12 +1065,13 @@ , i8, , + i32, i32); define @intrinsic_vor_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vor.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1056,7 +1080,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1086,12 +1110,13 @@ , i8, , + i32, i32); define @intrinsic_vor_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vor.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1100,7 +1125,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1130,12 +1155,13 @@ , i8, , + i32, i32); define @intrinsic_vor_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vor.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1144,7 +1170,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1174,12 +1200,13 @@ , i8, , + i32, i32); define @intrinsic_vor_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vor.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1188,7 +1215,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1218,12 +1245,13 @@ , i8, , + i32, i32); define @intrinsic_vor_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vor.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1232,7 +1260,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1262,12 +1290,13 @@ , i8, , + i32, i32); define @intrinsic_vor_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vor.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1276,7 +1305,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1306,12 +1335,13 @@ , i16, , + i32, i32); define @intrinsic_vor_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vor.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1320,7 +1350,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1350,12 +1380,13 @@ , i16, , + i32, i32); define @intrinsic_vor_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vor.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1364,7 +1395,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1394,12 +1425,13 @@ , i16, , + i32, i32); define @intrinsic_vor_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vor.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1408,7 +1440,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1438,12 +1470,13 @@ , i16, , + i32, i32); define @intrinsic_vor_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vor.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1452,7 +1485,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1482,12 +1515,13 @@ , i16, , + i32, i32); define @intrinsic_vor_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vor.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1496,7 +1530,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1526,12 +1560,13 @@ , i16, , + i32, i32); define @intrinsic_vor_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vor.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1540,7 +1575,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1570,12 +1605,13 @@ , i32, , + i32, i32); define @intrinsic_vor_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vor.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1584,7 +1620,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1614,12 +1650,13 @@ , i32, , + i32, i32); define @intrinsic_vor_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vor.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1628,7 +1665,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1658,12 +1695,13 @@ , i32, , + i32, i32); define @intrinsic_vor_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vor.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1672,7 +1710,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1702,12 +1740,13 @@ , i32, , + i32, i32); define @intrinsic_vor_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vor.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1716,7 +1755,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1746,12 +1785,13 @@ , i32, , + i32, i32); define @intrinsic_vor_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vor.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1760,7 +1800,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1796,6 +1836,7 @@ , i64, , + i32, i32); define @intrinsic_vor_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1804,10 +1845,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v25, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vor.vv v8, v9, v25, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1817,7 +1858,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1853,6 +1894,7 @@ , i64, , + i32, i32); define @intrinsic_vor_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1861,10 +1903,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v26, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vor.vv v8, v10, v26, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1874,7 +1916,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1910,6 +1952,7 @@ , i64, , + i32, i32); define @intrinsic_vor_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1918,10 +1961,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v28, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vor.vv v8, v12, v28, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1931,7 +1974,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1967,6 +2010,7 @@ , i64, , + i32, i32); define @intrinsic_vor_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1975,10 +2019,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v24, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vor.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1988,7 +2032,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2011,7 +2055,7 @@ define @intrinsic_vor_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vor.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2020,7 +2064,7 @@ %1, i8 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2043,7 +2087,7 @@ define @intrinsic_vor_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vor.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2052,7 +2096,7 @@ %1, i8 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2075,7 +2119,7 @@ define @intrinsic_vor_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vor.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2084,7 +2128,7 @@ %1, i8 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2107,7 +2151,7 @@ define @intrinsic_vor_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vor.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2116,7 +2160,7 @@ %1, i8 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2139,7 +2183,7 @@ define @intrinsic_vor_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vor.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2148,7 +2192,7 @@ %1, i8 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2171,7 +2215,7 @@ define @intrinsic_vor_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vor.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2180,7 +2224,7 @@ %1, i8 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2203,7 +2247,7 @@ define @intrinsic_vor_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vor.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2212,7 +2256,7 @@ %1, i8 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2235,7 +2279,7 @@ define @intrinsic_vor_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vor.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2244,7 +2288,7 @@ %1, i16 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2267,7 +2311,7 @@ define @intrinsic_vor_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vor.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2276,7 +2320,7 @@ %1, i16 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2299,7 +2343,7 @@ define @intrinsic_vor_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vor.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2308,7 +2352,7 @@ %1, i16 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2331,7 +2375,7 @@ define @intrinsic_vor_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vor.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2340,7 +2384,7 @@ %1, i16 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2363,7 +2407,7 @@ define @intrinsic_vor_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vor.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2372,7 +2416,7 @@ %1, i16 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2395,7 +2439,7 @@ define @intrinsic_vor_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vor.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2404,7 +2448,7 @@ %1, i16 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2427,7 +2471,7 @@ define @intrinsic_vor_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vor.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2436,7 +2480,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2459,7 +2503,7 @@ define @intrinsic_vor_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vor.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2468,7 +2512,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2491,7 +2535,7 @@ define @intrinsic_vor_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vor.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2500,7 +2544,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2523,7 +2567,7 @@ define @intrinsic_vor_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vor.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2532,7 +2576,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2555,7 +2599,7 @@ define @intrinsic_vor_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vor.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2564,7 +2608,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2587,7 +2631,7 @@ define @intrinsic_vor_mask_vi_nxv1i64_nxv1i64_i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vor.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2596,7 +2640,7 @@ %1, i64 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2619,7 +2663,7 @@ define @intrinsic_vor_mask_vi_nxv2i64_nxv2i64_i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vor.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2628,7 +2672,7 @@ %1, i64 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2651,7 +2695,7 @@ define @intrinsic_vor_mask_vi_nxv4i64_nxv4i64_i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vor.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2660,7 +2704,7 @@ %1, i64 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2683,7 +2727,7 @@ define @intrinsic_vor_mask_vi_nxv8i64_nxv8i64_i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vor.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2692,7 +2736,7 @@ %1, i64 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vor-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vor-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vor-rv64.ll @@ -26,12 +26,13 @@ , , , + i64, i64); define @intrinsic_vor_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vor.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -70,12 +71,13 @@ , , , + i64, i64); define @intrinsic_vor_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vor.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -114,12 +116,13 @@ , , , + i64, i64); define @intrinsic_vor_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vor.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -158,12 +161,13 @@ , , , + i64, i64); define @intrinsic_vor_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vor.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -202,12 +206,13 @@ , , , + i64, i64); define @intrinsic_vor_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vor.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -246,12 +251,13 @@ , , , + i64, i64); define @intrinsic_vor_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vor.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -260,7 +266,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -290,13 +296,14 @@ , , , + i64, i64); define @intrinsic_vor_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vor.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -335,12 +342,13 @@ , , , + i64, i64); define @intrinsic_vor_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vor.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -379,12 +387,13 @@ , , , + i64, i64); define @intrinsic_vor_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vor.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -423,12 +432,13 @@ , , , + i64, i64); define @intrinsic_vor_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vor.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -467,12 +477,13 @@ , , , + i64, i64); define @intrinsic_vor_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vor.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -481,7 +492,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -511,12 +522,13 @@ , , , + i64, i64); define @intrinsic_vor_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vor.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -525,7 +537,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -555,13 +567,14 @@ , , , + i64, i64); define @intrinsic_vor_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vor.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -600,12 +613,13 @@ , , , + i64, i64); define @intrinsic_vor_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vor.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -644,12 +658,13 @@ , , , + i64, i64); define @intrinsic_vor_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vor.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -658,7 +673,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -688,12 +703,13 @@ , , , + i64, i64); define @intrinsic_vor_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vor.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -702,7 +718,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -732,12 +748,13 @@ , , , + i64, i64); define @intrinsic_vor_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vor.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +763,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -776,13 +793,14 @@ , , , + i64, i64); define @intrinsic_vor_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vor.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +809,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -821,12 +839,13 @@ , , , + i64, i64); define @intrinsic_vor_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vor.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +854,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -865,12 +884,13 @@ , , , + i64, i64); define @intrinsic_vor_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vor.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +899,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -909,12 +929,13 @@ , , , + i64, i64); define @intrinsic_vor_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vor.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +944,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -953,13 +974,14 @@ , , , + i64, i64); define @intrinsic_vor_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vor.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -968,7 +990,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -998,12 +1020,13 @@ , i8, , + i64, i64); define @intrinsic_vor_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vor.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1012,7 +1035,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1042,12 +1065,13 @@ , i8, , + i64, i64); define @intrinsic_vor_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vor.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1056,7 +1080,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1086,12 +1110,13 @@ , i8, , + i64, i64); define @intrinsic_vor_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vor.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1100,7 +1125,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1130,12 +1155,13 @@ , i8, , + i64, i64); define @intrinsic_vor_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vor.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1144,7 +1170,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1174,12 +1200,13 @@ , i8, , + i64, i64); define @intrinsic_vor_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vor.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1188,7 +1215,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1218,12 +1245,13 @@ , i8, , + i64, i64); define @intrinsic_vor_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vor.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1232,7 +1260,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1262,12 +1290,13 @@ , i8, , + i64, i64); define @intrinsic_vor_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vor.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1276,7 +1305,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1306,12 +1335,13 @@ , i16, , + i64, i64); define @intrinsic_vor_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vor.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1320,7 +1350,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1350,12 +1380,13 @@ , i16, , + i64, i64); define @intrinsic_vor_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vor.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1364,7 +1395,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1394,12 +1425,13 @@ , i16, , + i64, i64); define @intrinsic_vor_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vor.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1408,7 +1440,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1438,12 +1470,13 @@ , i16, , + i64, i64); define @intrinsic_vor_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vor.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1452,7 +1485,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1482,12 +1515,13 @@ , i16, , + i64, i64); define @intrinsic_vor_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vor.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1496,7 +1530,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1526,12 +1560,13 @@ , i16, , + i64, i64); define @intrinsic_vor_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vor.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1540,7 +1575,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1570,12 +1605,13 @@ , i32, , + i64, i64); define @intrinsic_vor_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vor.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1584,7 +1620,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1614,12 +1650,13 @@ , i32, , + i64, i64); define @intrinsic_vor_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vor.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1628,7 +1665,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1658,12 +1695,13 @@ , i32, , + i64, i64); define @intrinsic_vor_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vor.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1672,7 +1710,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1702,12 +1740,13 @@ , i32, , + i64, i64); define @intrinsic_vor_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vor.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1716,7 +1755,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1746,12 +1785,13 @@ , i32, , + i64, i64); define @intrinsic_vor_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vor.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1760,7 +1800,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1790,12 +1830,13 @@ , i64, , + i64, i64); define @intrinsic_vor_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vor.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1804,7 +1845,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1834,12 +1875,13 @@ , i64, , + i64, i64); define @intrinsic_vor_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vor.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1848,7 +1890,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1878,12 +1920,13 @@ , i64, , + i64, i64); define @intrinsic_vor_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vor.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1892,7 +1935,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1922,12 +1965,13 @@ , i64, , + i64, i64); define @intrinsic_vor_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vor.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1936,7 +1980,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1959,7 +2003,7 @@ define @intrinsic_vor_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vor.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1968,7 +2012,7 @@ %1, i8 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1991,7 +2035,7 @@ define @intrinsic_vor_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vor.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2000,7 +2044,7 @@ %1, i8 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2023,7 +2067,7 @@ define @intrinsic_vor_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vor.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2032,7 +2076,7 @@ %1, i8 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2055,7 +2099,7 @@ define @intrinsic_vor_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vor.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2064,7 +2108,7 @@ %1, i8 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2087,7 +2131,7 @@ define @intrinsic_vor_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vor.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2096,7 +2140,7 @@ %1, i8 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2119,7 +2163,7 @@ define @intrinsic_vor_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vor.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2128,7 +2172,7 @@ %1, i8 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2151,7 +2195,7 @@ define @intrinsic_vor_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vor.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2160,7 +2204,7 @@ %1, i8 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2183,7 +2227,7 @@ define @intrinsic_vor_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vor.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2192,7 +2236,7 @@ %1, i16 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2215,7 +2259,7 @@ define @intrinsic_vor_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vor.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2224,7 +2268,7 @@ %1, i16 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2247,7 +2291,7 @@ define @intrinsic_vor_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vor.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2256,7 +2300,7 @@ %1, i16 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2279,7 +2323,7 @@ define @intrinsic_vor_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vor.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2288,7 +2332,7 @@ %1, i16 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2311,7 +2355,7 @@ define @intrinsic_vor_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vor.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2320,7 +2364,7 @@ %1, i16 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2343,7 +2387,7 @@ define @intrinsic_vor_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vor.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2352,7 +2396,7 @@ %1, i16 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2375,7 +2419,7 @@ define @intrinsic_vor_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vor.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2384,7 +2428,7 @@ %1, i32 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2407,7 +2451,7 @@ define @intrinsic_vor_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vor.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2416,7 +2460,7 @@ %1, i32 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2439,7 +2483,7 @@ define @intrinsic_vor_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vor.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2448,7 +2492,7 @@ %1, i32 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2471,7 +2515,7 @@ define @intrinsic_vor_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vor.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2480,7 +2524,7 @@ %1, i32 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2503,7 +2547,7 @@ define @intrinsic_vor_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vor.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2512,7 +2556,7 @@ %1, i32 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2535,7 +2579,7 @@ define @intrinsic_vor_mask_vi_nxv1i64_nxv1i64_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vor.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2544,7 +2588,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2567,7 +2611,7 @@ define @intrinsic_vor_mask_vi_nxv2i64_nxv2i64_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vor.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2576,7 +2620,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2599,7 +2643,7 @@ define @intrinsic_vor_mask_vi_nxv4i64_nxv4i64_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vor.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2608,7 +2652,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2631,7 +2675,7 @@ define @intrinsic_vor_mask_vi_nxv8i64_nxv8i64_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vor.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2640,7 +2684,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll @@ -26,12 +26,13 @@ , , , + i32, i32); define @intrinsic_vrem_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vrem.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -70,12 +71,13 @@ , , , + i32, i32); define @intrinsic_vrem_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vrem.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -114,12 +116,13 @@ , , , + i32, i32); define @intrinsic_vrem_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vrem.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -158,12 +161,13 @@ , , , + i32, i32); define @intrinsic_vrem_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vrem.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -202,12 +206,13 @@ , , , + i32, i32); define @intrinsic_vrem_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vrem.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -246,12 +251,13 @@ , , , + i32, i32); define @intrinsic_vrem_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vrem.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -260,7 +266,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -290,13 +296,14 @@ , , , + i32, i32); define @intrinsic_vrem_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vrem.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -335,12 +342,13 @@ , , , + i32, i32); define @intrinsic_vrem_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vrem.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -379,12 +387,13 @@ , , , + i32, i32); define @intrinsic_vrem_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vrem.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -423,12 +432,13 @@ , , , + i32, i32); define @intrinsic_vrem_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vrem.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -467,12 +477,13 @@ , , , + i32, i32); define @intrinsic_vrem_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vrem.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -481,7 +492,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -511,12 +522,13 @@ , , , + i32, i32); define @intrinsic_vrem_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vrem.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -525,7 +537,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -555,13 +567,14 @@ , , , + i32, i32); define @intrinsic_vrem_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vrem.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -600,12 +613,13 @@ , , , + i32, i32); define @intrinsic_vrem_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vrem.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -644,12 +658,13 @@ , , , + i32, i32); define @intrinsic_vrem_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vrem.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -658,7 +673,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -688,12 +703,13 @@ , , , + i32, i32); define @intrinsic_vrem_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vrem.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -702,7 +718,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -732,12 +748,13 @@ , , , + i32, i32); define @intrinsic_vrem_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vrem.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +763,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -776,13 +793,14 @@ , , , + i32, i32); define @intrinsic_vrem_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vrem.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +809,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -821,12 +839,13 @@ , , , + i32, i32); define @intrinsic_vrem_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vrem.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +854,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -865,12 +884,13 @@ , , , + i32, i32); define @intrinsic_vrem_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vrem.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +899,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -909,12 +929,13 @@ , , , + i32, i32); define @intrinsic_vrem_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vrem.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +944,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -953,13 +974,14 @@ , , , + i32, i32); define @intrinsic_vrem_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vrem.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -968,7 +990,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -998,12 +1020,13 @@ , i8, , + i32, i32); define @intrinsic_vrem_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vrem.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1012,7 +1035,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1042,12 +1065,13 @@ , i8, , + i32, i32); define @intrinsic_vrem_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vrem.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1056,7 +1080,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1086,12 +1110,13 @@ , i8, , + i32, i32); define @intrinsic_vrem_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vrem.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1100,7 +1125,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1130,12 +1155,13 @@ , i8, , + i32, i32); define @intrinsic_vrem_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vrem.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1144,7 +1170,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1174,12 +1200,13 @@ , i8, , + i32, i32); define @intrinsic_vrem_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vrem.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1188,7 +1215,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1218,12 +1245,13 @@ , i8, , + i32, i32); define @intrinsic_vrem_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vrem.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1232,7 +1260,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1262,12 +1290,13 @@ , i8, , + i32, i32); define @intrinsic_vrem_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vrem.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1276,7 +1305,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1306,12 +1335,13 @@ , i16, , + i32, i32); define @intrinsic_vrem_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vrem.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1320,7 +1350,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1350,12 +1380,13 @@ , i16, , + i32, i32); define @intrinsic_vrem_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vrem.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1364,7 +1395,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1394,12 +1425,13 @@ , i16, , + i32, i32); define @intrinsic_vrem_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vrem.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1408,7 +1440,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1438,12 +1470,13 @@ , i16, , + i32, i32); define @intrinsic_vrem_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vrem.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1452,7 +1485,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1482,12 +1515,13 @@ , i16, , + i32, i32); define @intrinsic_vrem_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vrem.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1496,7 +1530,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1526,12 +1560,13 @@ , i16, , + i32, i32); define @intrinsic_vrem_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vrem.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1540,7 +1575,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1570,12 +1605,13 @@ , i32, , + i32, i32); define @intrinsic_vrem_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vrem.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1584,7 +1620,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1614,12 +1650,13 @@ , i32, , + i32, i32); define @intrinsic_vrem_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vrem.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1628,7 +1665,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1658,12 +1695,13 @@ , i32, , + i32, i32); define @intrinsic_vrem_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vrem.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1672,7 +1710,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1702,12 +1740,13 @@ , i32, , + i32, i32); define @intrinsic_vrem_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vrem.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1716,7 +1755,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1746,12 +1785,13 @@ , i32, , + i32, i32); define @intrinsic_vrem_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vrem.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1760,7 +1800,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1796,6 +1836,7 @@ , i64, , + i32, i32); define @intrinsic_vrem_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1804,10 +1845,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v25, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vrem.vv v8, v9, v25, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1817,7 +1858,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1853,6 +1894,7 @@ , i64, , + i32, i32); define @intrinsic_vrem_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1861,10 +1903,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v26, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vrem.vv v8, v10, v26, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1874,7 +1916,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1910,6 +1952,7 @@ , i64, , + i32, i32); define @intrinsic_vrem_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1918,10 +1961,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v28, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vrem.vv v8, v12, v28, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1931,7 +1974,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1967,6 +2010,7 @@ , i64, , + i32, i32); define @intrinsic_vrem_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1975,10 +2019,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v24, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vrem.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1988,7 +2032,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vrem-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrem-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrem-rv64.ll @@ -26,12 +26,13 @@ , , , + i64, i64); define @intrinsic_vrem_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vrem.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -70,12 +71,13 @@ , , , + i64, i64); define @intrinsic_vrem_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vrem.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -114,12 +116,13 @@ , , , + i64, i64); define @intrinsic_vrem_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vrem.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -158,12 +161,13 @@ , , , + i64, i64); define @intrinsic_vrem_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vrem.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -202,12 +206,13 @@ , , , + i64, i64); define @intrinsic_vrem_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vrem.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -246,12 +251,13 @@ , , , + i64, i64); define @intrinsic_vrem_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vrem.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -260,7 +266,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -290,13 +296,14 @@ , , , + i64, i64); define @intrinsic_vrem_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vrem.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -335,12 +342,13 @@ , , , + i64, i64); define @intrinsic_vrem_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vrem.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -379,12 +387,13 @@ , , , + i64, i64); define @intrinsic_vrem_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vrem.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -423,12 +432,13 @@ , , , + i64, i64); define @intrinsic_vrem_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vrem.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -467,12 +477,13 @@ , , , + i64, i64); define @intrinsic_vrem_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vrem.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -481,7 +492,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -511,12 +522,13 @@ , , , + i64, i64); define @intrinsic_vrem_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vrem.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -525,7 +537,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -555,13 +567,14 @@ , , , + i64, i64); define @intrinsic_vrem_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vrem.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -600,12 +613,13 @@ , , , + i64, i64); define @intrinsic_vrem_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vrem.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -644,12 +658,13 @@ , , , + i64, i64); define @intrinsic_vrem_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vrem.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -658,7 +673,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -688,12 +703,13 @@ , , , + i64, i64); define @intrinsic_vrem_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vrem.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -702,7 +718,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -732,12 +748,13 @@ , , , + i64, i64); define @intrinsic_vrem_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vrem.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +763,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -776,13 +793,14 @@ , , , + i64, i64); define @intrinsic_vrem_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vrem.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +809,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -821,12 +839,13 @@ , , , + i64, i64); define @intrinsic_vrem_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vrem.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +854,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -865,12 +884,13 @@ , , , + i64, i64); define @intrinsic_vrem_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vrem.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +899,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -909,12 +929,13 @@ , , , + i64, i64); define @intrinsic_vrem_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vrem.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +944,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -953,13 +974,14 @@ , , , + i64, i64); define @intrinsic_vrem_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vrem.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -968,7 +990,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -998,12 +1020,13 @@ , i8, , + i64, i64); define @intrinsic_vrem_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vrem.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1012,7 +1035,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1042,12 +1065,13 @@ , i8, , + i64, i64); define @intrinsic_vrem_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vrem.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1056,7 +1080,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1086,12 +1110,13 @@ , i8, , + i64, i64); define @intrinsic_vrem_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vrem.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1100,7 +1125,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1130,12 +1155,13 @@ , i8, , + i64, i64); define @intrinsic_vrem_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vrem.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1144,7 +1170,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1174,12 +1200,13 @@ , i8, , + i64, i64); define @intrinsic_vrem_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vrem.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1188,7 +1215,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1218,12 +1245,13 @@ , i8, , + i64, i64); define @intrinsic_vrem_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vrem.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1232,7 +1260,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1262,12 +1290,13 @@ , i8, , + i64, i64); define @intrinsic_vrem_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vrem.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1276,7 +1305,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1306,12 +1335,13 @@ , i16, , + i64, i64); define @intrinsic_vrem_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vrem.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1320,7 +1350,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1350,12 +1380,13 @@ , i16, , + i64, i64); define @intrinsic_vrem_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vrem.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1364,7 +1395,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1394,12 +1425,13 @@ , i16, , + i64, i64); define @intrinsic_vrem_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vrem.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1408,7 +1440,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1438,12 +1470,13 @@ , i16, , + i64, i64); define @intrinsic_vrem_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vrem.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1452,7 +1485,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1482,12 +1515,13 @@ , i16, , + i64, i64); define @intrinsic_vrem_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vrem.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1496,7 +1530,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1526,12 +1560,13 @@ , i16, , + i64, i64); define @intrinsic_vrem_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vrem.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1540,7 +1575,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1570,12 +1605,13 @@ , i32, , + i64, i64); define @intrinsic_vrem_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vrem.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1584,7 +1620,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1614,12 +1650,13 @@ , i32, , + i64, i64); define @intrinsic_vrem_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vrem.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1628,7 +1665,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1658,12 +1695,13 @@ , i32, , + i64, i64); define @intrinsic_vrem_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vrem.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1672,7 +1710,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1702,12 +1740,13 @@ , i32, , + i64, i64); define @intrinsic_vrem_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vrem.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1716,7 +1755,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1746,12 +1785,13 @@ , i32, , + i64, i64); define @intrinsic_vrem_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vrem.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1760,7 +1800,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1790,12 +1830,13 @@ , i64, , + i64, i64); define @intrinsic_vrem_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vrem.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1804,7 +1845,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1834,12 +1875,13 @@ , i64, , + i64, i64); define @intrinsic_vrem_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vrem.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1848,7 +1890,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1878,12 +1920,13 @@ , i64, , + i64, i64); define @intrinsic_vrem_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vrem.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1892,7 +1935,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1922,12 +1965,13 @@ , i64, , + i64, i64); define @intrinsic_vrem_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vrem.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1936,7 +1980,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll @@ -26,12 +26,13 @@ , , , + i32, i32); define @intrinsic_vremu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vremu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -70,12 +71,13 @@ , , , + i32, i32); define @intrinsic_vremu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vremu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -114,12 +116,13 @@ , , , + i32, i32); define @intrinsic_vremu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vremu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -158,12 +161,13 @@ , , , + i32, i32); define @intrinsic_vremu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vremu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -202,12 +206,13 @@ , , , + i32, i32); define @intrinsic_vremu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vremu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -246,12 +251,13 @@ , , , + i32, i32); define @intrinsic_vremu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vremu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -260,7 +266,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -290,13 +296,14 @@ , , , + i32, i32); define @intrinsic_vremu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vremu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -335,12 +342,13 @@ , , , + i32, i32); define @intrinsic_vremu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vremu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -379,12 +387,13 @@ , , , + i32, i32); define @intrinsic_vremu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vremu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -423,12 +432,13 @@ , , , + i32, i32); define @intrinsic_vremu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vremu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -467,12 +477,13 @@ , , , + i32, i32); define @intrinsic_vremu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vremu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -481,7 +492,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -511,12 +522,13 @@ , , , + i32, i32); define @intrinsic_vremu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vremu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -525,7 +537,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -555,13 +567,14 @@ , , , + i32, i32); define @intrinsic_vremu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vremu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -600,12 +613,13 @@ , , , + i32, i32); define @intrinsic_vremu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vremu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -644,12 +658,13 @@ , , , + i32, i32); define @intrinsic_vremu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vremu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -658,7 +673,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -688,12 +703,13 @@ , , , + i32, i32); define @intrinsic_vremu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vremu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -702,7 +718,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -732,12 +748,13 @@ , , , + i32, i32); define @intrinsic_vremu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vremu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +763,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -776,13 +793,14 @@ , , , + i32, i32); define @intrinsic_vremu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vremu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +809,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -821,12 +839,13 @@ , , , + i32, i32); define @intrinsic_vremu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vremu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +854,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -865,12 +884,13 @@ , , , + i32, i32); define @intrinsic_vremu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vremu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +899,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -909,12 +929,13 @@ , , , + i32, i32); define @intrinsic_vremu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vremu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +944,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -953,13 +974,14 @@ , , , + i32, i32); define @intrinsic_vremu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vremu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -968,7 +990,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -998,12 +1020,13 @@ , i8, , + i32, i32); define @intrinsic_vremu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vremu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1012,7 +1035,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1042,12 +1065,13 @@ , i8, , + i32, i32); define @intrinsic_vremu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vremu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1056,7 +1080,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1086,12 +1110,13 @@ , i8, , + i32, i32); define @intrinsic_vremu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vremu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1100,7 +1125,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1130,12 +1155,13 @@ , i8, , + i32, i32); define @intrinsic_vremu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vremu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1144,7 +1170,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1174,12 +1200,13 @@ , i8, , + i32, i32); define @intrinsic_vremu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vremu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1188,7 +1215,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1218,12 +1245,13 @@ , i8, , + i32, i32); define @intrinsic_vremu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vremu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1232,7 +1260,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1262,12 +1290,13 @@ , i8, , + i32, i32); define @intrinsic_vremu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vremu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1276,7 +1305,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1306,12 +1335,13 @@ , i16, , + i32, i32); define @intrinsic_vremu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vremu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1320,7 +1350,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1350,12 +1380,13 @@ , i16, , + i32, i32); define @intrinsic_vremu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vremu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1364,7 +1395,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1394,12 +1425,13 @@ , i16, , + i32, i32); define @intrinsic_vremu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vremu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1408,7 +1440,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1438,12 +1470,13 @@ , i16, , + i32, i32); define @intrinsic_vremu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vremu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1452,7 +1485,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1482,12 +1515,13 @@ , i16, , + i32, i32); define @intrinsic_vremu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vremu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1496,7 +1530,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1526,12 +1560,13 @@ , i16, , + i32, i32); define @intrinsic_vremu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vremu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1540,7 +1575,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1570,12 +1605,13 @@ , i32, , + i32, i32); define @intrinsic_vremu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vremu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1584,7 +1620,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1614,12 +1650,13 @@ , i32, , + i32, i32); define @intrinsic_vremu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vremu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1628,7 +1665,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1658,12 +1695,13 @@ , i32, , + i32, i32); define @intrinsic_vremu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vremu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1672,7 +1710,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1702,12 +1740,13 @@ , i32, , + i32, i32); define @intrinsic_vremu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vremu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1716,7 +1755,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1746,12 +1785,13 @@ , i32, , + i32, i32); define @intrinsic_vremu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vremu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1760,7 +1800,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1796,6 +1836,7 @@ , i64, , + i32, i32); define @intrinsic_vremu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1804,10 +1845,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v25, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vremu.vv v8, v9, v25, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1817,7 +1858,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1853,6 +1894,7 @@ , i64, , + i32, i32); define @intrinsic_vremu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1861,10 +1903,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v26, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vremu.vv v8, v10, v26, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1874,7 +1916,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1910,6 +1952,7 @@ , i64, , + i32, i32); define @intrinsic_vremu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1918,10 +1961,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v28, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vremu.vv v8, v12, v28, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1931,7 +1974,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1967,6 +2010,7 @@ , i64, , + i32, i32); define @intrinsic_vremu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1975,10 +2019,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v24, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vremu.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1988,7 +2032,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vremu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vremu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vremu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vremu-rv64.ll @@ -26,12 +26,13 @@ , , , + i64, i64); define @intrinsic_vremu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vremu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -70,12 +71,13 @@ , , , + i64, i64); define @intrinsic_vremu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vremu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -114,12 +116,13 @@ , , , + i64, i64); define @intrinsic_vremu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vremu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -158,12 +161,13 @@ , , , + i64, i64); define @intrinsic_vremu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vremu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -202,12 +206,13 @@ , , , + i64, i64); define @intrinsic_vremu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vremu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -246,12 +251,13 @@ , , , + i64, i64); define @intrinsic_vremu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vremu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -260,7 +266,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -290,13 +296,14 @@ , , , + i64, i64); define @intrinsic_vremu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vremu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -335,12 +342,13 @@ , , , + i64, i64); define @intrinsic_vremu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vremu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -379,12 +387,13 @@ , , , + i64, i64); define @intrinsic_vremu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vremu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -423,12 +432,13 @@ , , , + i64, i64); define @intrinsic_vremu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vremu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -467,12 +477,13 @@ , , , + i64, i64); define @intrinsic_vremu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vremu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -481,7 +492,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -511,12 +522,13 @@ , , , + i64, i64); define @intrinsic_vremu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vremu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -525,7 +537,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -555,13 +567,14 @@ , , , + i64, i64); define @intrinsic_vremu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vremu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -600,12 +613,13 @@ , , , + i64, i64); define @intrinsic_vremu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vremu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -644,12 +658,13 @@ , , , + i64, i64); define @intrinsic_vremu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vremu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -658,7 +673,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -688,12 +703,13 @@ , , , + i64, i64); define @intrinsic_vremu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vremu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -702,7 +718,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -732,12 +748,13 @@ , , , + i64, i64); define @intrinsic_vremu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vremu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +763,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -776,13 +793,14 @@ , , , + i64, i64); define @intrinsic_vremu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vremu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +809,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -821,12 +839,13 @@ , , , + i64, i64); define @intrinsic_vremu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vremu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +854,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -865,12 +884,13 @@ , , , + i64, i64); define @intrinsic_vremu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vremu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +899,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -909,12 +929,13 @@ , , , + i64, i64); define @intrinsic_vremu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vremu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +944,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -953,13 +974,14 @@ , , , + i64, i64); define @intrinsic_vremu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vremu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -968,7 +990,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -998,12 +1020,13 @@ , i8, , + i64, i64); define @intrinsic_vremu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vremu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1012,7 +1035,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1042,12 +1065,13 @@ , i8, , + i64, i64); define @intrinsic_vremu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vremu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1056,7 +1080,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1086,12 +1110,13 @@ , i8, , + i64, i64); define @intrinsic_vremu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vremu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1100,7 +1125,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1130,12 +1155,13 @@ , i8, , + i64, i64); define @intrinsic_vremu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vremu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1144,7 +1170,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1174,12 +1200,13 @@ , i8, , + i64, i64); define @intrinsic_vremu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vremu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1188,7 +1215,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1218,12 +1245,13 @@ , i8, , + i64, i64); define @intrinsic_vremu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vremu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1232,7 +1260,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1262,12 +1290,13 @@ , i8, , + i64, i64); define @intrinsic_vremu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vremu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1276,7 +1305,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1306,12 +1335,13 @@ , i16, , + i64, i64); define @intrinsic_vremu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vremu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1320,7 +1350,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1350,12 +1380,13 @@ , i16, , + i64, i64); define @intrinsic_vremu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vremu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1364,7 +1395,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1394,12 +1425,13 @@ , i16, , + i64, i64); define @intrinsic_vremu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vremu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1408,7 +1440,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1438,12 +1470,13 @@ , i16, , + i64, i64); define @intrinsic_vremu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vremu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1452,7 +1485,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1482,12 +1515,13 @@ , i16, , + i64, i64); define @intrinsic_vremu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vremu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1496,7 +1530,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1526,12 +1560,13 @@ , i16, , + i64, i64); define @intrinsic_vremu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vremu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1540,7 +1575,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1570,12 +1605,13 @@ , i32, , + i64, i64); define @intrinsic_vremu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vremu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1584,7 +1620,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1614,12 +1650,13 @@ , i32, , + i64, i64); define @intrinsic_vremu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vremu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1628,7 +1665,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1658,12 +1695,13 @@ , i32, , + i64, i64); define @intrinsic_vremu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vremu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1672,7 +1710,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1702,12 +1740,13 @@ , i32, , + i64, i64); define @intrinsic_vremu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vremu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1716,7 +1755,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1746,12 +1785,13 @@ , i32, , + i64, i64); define @intrinsic_vremu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vremu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1760,7 +1800,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1790,12 +1830,13 @@ , i64, , + i64, i64); define @intrinsic_vremu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vremu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1804,7 +1845,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1834,12 +1875,13 @@ , i64, , + i64, i64); define @intrinsic_vremu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vremu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1848,7 +1890,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1878,12 +1920,13 @@ , i64, , + i64, i64); define @intrinsic_vremu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vremu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1892,7 +1935,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1922,12 +1965,13 @@ , i64, , + i64, i64); define @intrinsic_vremu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vremu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1936,7 +1980,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll @@ -27,12 +27,13 @@ , , , + i32, i32); define @intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -41,7 +42,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -72,12 +73,13 @@ , , , + i32, i32); define @intrinsic_vrgather_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -86,7 +88,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -117,12 +119,13 @@ , , , + i32, i32); define @intrinsic_vrgather_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -131,7 +134,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -162,12 +165,13 @@ , , , + i32, i32); define @intrinsic_vrgather_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -176,7 +180,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -207,12 +211,13 @@ , , , + i32, i32); define @intrinsic_vrgather_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vrgather.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -221,7 +226,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -252,12 +257,13 @@ , , , + i32, i32); define @intrinsic_vrgather_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vrgather.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -266,7 +272,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -297,13 +303,14 @@ , , , + i32, i32); define @intrinsic_vrgather_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -312,7 +319,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -343,12 +350,13 @@ , , , + i32, i32); define @intrinsic_vrgather_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -357,7 +365,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -388,12 +396,13 @@ , , , + i32, i32); define @intrinsic_vrgather_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -402,7 +411,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -433,12 +442,13 @@ , , , + i32, i32); define @intrinsic_vrgather_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -447,7 +457,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -478,12 +488,13 @@ , , , + i32, i32); define @intrinsic_vrgather_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vrgather.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -492,7 +503,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -523,12 +534,13 @@ , , , + i32, i32); define @intrinsic_vrgather_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vrgather.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -537,7 +549,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -568,13 +580,14 @@ , , , + i32, i32); define @intrinsic_vrgather_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -583,7 +596,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -614,12 +627,13 @@ , , , + i32, i32); define @intrinsic_vrgather_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -628,7 +642,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -659,12 +673,13 @@ , , , + i32, i32); define @intrinsic_vrgather_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -673,7 +688,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -704,12 +719,13 @@ , , , + i32, i32); define @intrinsic_vrgather_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vrgather.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -718,7 +734,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -749,12 +765,13 @@ , , , + i32, i32); define @intrinsic_vrgather_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vrgather.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -763,7 +780,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -794,13 +811,14 @@ , , , + i32, i32); define @intrinsic_vrgather_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -809,7 +827,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -840,12 +858,13 @@ , , , + i32, i32); define @intrinsic_vrgather_mask_vv_nxv1f16_nxv1f16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1f16_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -854,7 +873,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -885,12 +904,13 @@ , , , + i32, i32); define @intrinsic_vrgather_mask_vv_nxv2f16_nxv2f16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2f16_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -899,7 +919,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -930,12 +950,13 @@ , , , + i32, i32); define @intrinsic_vrgather_mask_vv_nxv4f16_nxv4f16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4f16_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -944,7 +965,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -975,12 +996,13 @@ , , , + i32, i32); define @intrinsic_vrgather_mask_vv_nxv8f16_nxv8f16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8f16_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vrgather.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -989,7 +1011,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1020,12 +1042,13 @@ , , , + i32, i32); define @intrinsic_vrgather_mask_vv_nxv16f16_nxv16f16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16f16_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vrgather.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -1034,7 +1057,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1065,13 +1088,14 @@ , , , + i32, i32); define @intrinsic_vrgather_mask_vv_nxv32f16_nxv32f16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv32f16_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -1080,7 +1104,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1111,12 +1135,13 @@ , , , + i32, i32); define @intrinsic_vrgather_mask_vv_nxv1f32_nxv1f32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1f32_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -1125,7 +1150,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1156,12 +1181,13 @@ , , , + i32, i32); define @intrinsic_vrgather_mask_vv_nxv2f32_nxv2f32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2f32_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -1170,7 +1196,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1201,12 +1227,13 @@ , , , + i32, i32); define @intrinsic_vrgather_mask_vv_nxv4f32_nxv4f32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4f32_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vrgather.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -1215,7 +1242,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1246,12 +1273,13 @@ , , , + i32, i32); define @intrinsic_vrgather_mask_vv_nxv8f32_nxv8f32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8f32_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vrgather.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -1260,7 +1288,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1291,13 +1319,14 @@ , , , + i32, i32); define @intrinsic_vrgather_mask_vv_nxv16f32_nxv16f32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16f32_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -1306,7 +1335,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1337,12 +1366,13 @@ , , , + i32, i32); define @intrinsic_vrgather_mask_vv_nxv1f64_nxv1f64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1f64_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -1351,7 +1381,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1382,12 +1412,13 @@ , , , + i32, i32); define @intrinsic_vrgather_mask_vv_nxv2f64_nxv2f64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2f64_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vrgather.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -1396,7 +1427,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1427,12 +1458,13 @@ , , , + i32, i32); define @intrinsic_vrgather_mask_vv_nxv4f64_nxv4f64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4f64_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vrgather.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -1441,7 +1473,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1472,13 +1504,14 @@ , , , + i32, i32); define @intrinsic_vrgather_mask_vv_nxv8f64_nxv8f64_nxv8i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8f64_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -1487,7 +1520,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1518,12 +1551,13 @@ , i32, , + i32, i32); define @intrinsic_vrgather_mask_vx_nxv1i8_nxv1i8_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1i8_nxv1i8_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1532,7 +1566,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1563,12 +1597,13 @@ , i32, , + i32, i32); define @intrinsic_vrgather_mask_vx_nxv2i8_nxv2i8_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2i8_nxv2i8_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1577,7 +1612,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1608,12 +1643,13 @@ , i32, , + i32, i32); define @intrinsic_vrgather_mask_vx_nxv4i8_nxv4i8_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4i8_nxv4i8_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1622,7 +1658,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1653,12 +1689,13 @@ , i32, , + i32, i32); define @intrinsic_vrgather_mask_vx_nxv8i8_nxv8i8_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8i8_nxv8i8_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1667,7 +1704,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1698,12 +1735,13 @@ , i32, , + i32, i32); define @intrinsic_vrgather_mask_vx_nxv16i8_nxv16i8_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16i8_nxv16i8_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vrgather.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1712,7 +1750,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1743,12 +1781,13 @@ , i32, , + i32, i32); define @intrinsic_vrgather_mask_vx_nxv32i8_nxv32i8_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv32i8_nxv32i8_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vrgather.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1757,7 +1796,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1788,12 +1827,13 @@ , i32, , + i32, i32); define @intrinsic_vrgather_mask_vx_nxv64i8_nxv64i8_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv64i8_nxv64i8_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vrgather.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1802,7 +1842,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1833,12 +1873,13 @@ , i32, , + i32, i32); define @intrinsic_vrgather_mask_vx_nxv1i16_nxv1i16_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1i16_nxv1i16_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1847,7 +1888,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1878,12 +1919,13 @@ , i32, , + i32, i32); define @intrinsic_vrgather_mask_vx_nxv2i16_nxv2i16_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2i16_nxv2i16_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1892,7 +1934,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1923,12 +1965,13 @@ , i32, , + i32, i32); define @intrinsic_vrgather_mask_vx_nxv4i16_nxv4i16_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4i16_nxv4i16_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1937,7 +1980,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1968,12 +2011,13 @@ , i32, , + i32, i32); define @intrinsic_vrgather_mask_vx_nxv8i16_nxv8i16_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8i16_nxv8i16_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vrgather.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1982,7 +2026,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2013,12 +2057,13 @@ , i32, , + i32, i32); define @intrinsic_vrgather_mask_vx_nxv16i16_nxv16i16_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16i16_nxv16i16_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vrgather.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -2027,7 +2072,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2058,12 +2103,13 @@ , i32, , + i32, i32); define @intrinsic_vrgather_mask_vx_nxv32i16_nxv32i16_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv32i16_nxv32i16_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vrgather.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -2072,7 +2118,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2103,12 +2149,13 @@ , i32, , + i32, i32); define @intrinsic_vrgather_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -2117,7 +2164,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2148,12 +2195,13 @@ , i32, , + i32, i32); define @intrinsic_vrgather_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -2162,7 +2210,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2193,12 +2241,13 @@ , i32, , + i32, i32); define @intrinsic_vrgather_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vrgather.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -2207,7 +2256,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2238,12 +2287,13 @@ , i32, , + i32, i32); define @intrinsic_vrgather_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vrgather.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -2252,7 +2302,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2283,12 +2333,13 @@ , i32, , + i32, i32); define @intrinsic_vrgather_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vrgather.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -2297,7 +2348,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2328,12 +2379,13 @@ , i32, , + i32, i32); define @intrinsic_vrgather_mask_vx_nxv1f16_nxv1f16_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1f16_nxv1f16_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -2342,7 +2394,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2373,12 +2425,13 @@ , i32, , + i32, i32); define @intrinsic_vrgather_mask_vx_nxv2f16_nxv2f16_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2f16_nxv2f16_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -2387,7 +2440,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2418,12 +2471,13 @@ , i32, , + i32, i32); define @intrinsic_vrgather_mask_vx_nxv4f16_nxv4f16_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4f16_nxv4f16_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -2432,7 +2486,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2463,12 +2517,13 @@ , i32, , + i32, i32); define @intrinsic_vrgather_mask_vx_nxv8f16_nxv8f16_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8f16_nxv8f16_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vrgather.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -2477,7 +2532,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2508,12 +2563,13 @@ , i32, , + i32, i32); define @intrinsic_vrgather_mask_vx_nxv16f16_nxv16f16_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16f16_nxv16f16_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vrgather.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -2522,7 +2578,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2553,12 +2609,13 @@ , i32, , + i32, i32); define @intrinsic_vrgather_mask_vx_nxv32f16_nxv32f16_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv32f16_nxv32f16_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vrgather.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -2567,7 +2624,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2598,12 +2655,13 @@ , i32, , + i32, i32); define @intrinsic_vrgather_mask_vx_nxv1f32_nxv1f32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1f32_nxv1f32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -2612,7 +2670,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2643,12 +2701,13 @@ , i32, , + i32, i32); define @intrinsic_vrgather_mask_vx_nxv2f32_nxv2f32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2f32_nxv2f32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -2657,7 +2716,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2688,12 +2747,13 @@ , i32, , + i32, i32); define @intrinsic_vrgather_mask_vx_nxv4f32_nxv4f32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4f32_nxv4f32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vrgather.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -2702,7 +2762,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2733,12 +2793,13 @@ , i32, , + i32, i32); define @intrinsic_vrgather_mask_vx_nxv8f32_nxv8f32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8f32_nxv8f32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vrgather.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -2747,7 +2808,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2778,12 +2839,13 @@ , i32, , + i32, i32); define @intrinsic_vrgather_mask_vx_nxv16f32_nxv16f32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16f32_nxv16f32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vrgather.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -2792,7 +2854,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2823,12 +2885,13 @@ , i32, , + i32, i32); define @intrinsic_vrgather_mask_vx_nxv1f64_nxv1f64_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1f64_nxv1f64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -2837,7 +2900,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2868,12 +2931,13 @@ , i32, , + i32, i32); define @intrinsic_vrgather_mask_vx_nxv2f64_nxv2f64_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2f64_nxv2f64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vrgather.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -2882,7 +2946,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2913,12 +2977,13 @@ , i32, , + i32, i32); define @intrinsic_vrgather_mask_vx_nxv4f64_nxv4f64_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4f64_nxv4f64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vrgather.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -2927,7 +2992,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2958,12 +3023,13 @@ , i32, , + i32, i32); define @intrinsic_vrgather_mask_vx_nxv8f64_nxv8f64_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8f64_nxv8f64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vrgather.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -2972,7 +3038,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2996,7 +3062,7 @@ define @intrinsic_vrgather_mask_vi_nxv1i8_nxv1i8_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1i8_nxv1i8_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -3005,7 +3071,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -3029,7 +3095,7 @@ define @intrinsic_vrgather_mask_vi_nxv2i8_nxv2i8_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2i8_nxv2i8_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -3038,7 +3104,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -3062,7 +3128,7 @@ define @intrinsic_vrgather_mask_vi_nxv4i8_nxv4i8_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4i8_nxv4i8_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -3071,7 +3137,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -3095,7 +3161,7 @@ define @intrinsic_vrgather_mask_vi_nxv8i8_nxv8i8_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8i8_nxv8i8_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -3104,7 +3170,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -3128,7 +3194,7 @@ define @intrinsic_vrgather_mask_vi_nxv16i8_nxv16i8_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16i8_nxv16i8_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vrgather.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -3137,7 +3203,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -3161,7 +3227,7 @@ define @intrinsic_vrgather_mask_vi_nxv32i8_nxv32i8_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv32i8_nxv32i8_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vrgather.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -3170,7 +3236,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -3194,7 +3260,7 @@ define @intrinsic_vrgather_mask_vi_nxv64i8_nxv64i8_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv64i8_nxv64i8_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vrgather.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -3203,7 +3269,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -3227,7 +3293,7 @@ define @intrinsic_vrgather_mask_vi_nxv1i16_nxv1i16_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1i16_nxv1i16_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -3236,7 +3302,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -3260,7 +3326,7 @@ define @intrinsic_vrgather_mask_vi_nxv2i16_nxv2i16_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2i16_nxv2i16_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -3269,7 +3335,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -3293,7 +3359,7 @@ define @intrinsic_vrgather_mask_vi_nxv4i16_nxv4i16_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4i16_nxv4i16_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -3302,7 +3368,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -3326,7 +3392,7 @@ define @intrinsic_vrgather_mask_vi_nxv8i16_nxv8i16_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8i16_nxv8i16_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vrgather.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -3335,7 +3401,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -3359,7 +3425,7 @@ define @intrinsic_vrgather_mask_vi_nxv16i16_nxv16i16_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16i16_nxv16i16_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vrgather.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -3368,7 +3434,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -3392,7 +3458,7 @@ define @intrinsic_vrgather_mask_vi_nxv32i16_nxv32i16_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv32i16_nxv32i16_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vrgather.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -3401,7 +3467,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -3425,7 +3491,7 @@ define @intrinsic_vrgather_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -3434,7 +3500,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -3458,7 +3524,7 @@ define @intrinsic_vrgather_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -3467,7 +3533,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -3491,7 +3557,7 @@ define @intrinsic_vrgather_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vrgather.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -3500,7 +3566,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -3524,7 +3590,7 @@ define @intrinsic_vrgather_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vrgather.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -3533,7 +3599,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -3557,7 +3623,7 @@ define @intrinsic_vrgather_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vrgather.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -3566,7 +3632,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -3590,7 +3656,7 @@ define @intrinsic_vrgather_mask_vi_nxv1f16_nxv1f16_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1f16_nxv1f16_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -3599,7 +3665,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -3623,7 +3689,7 @@ define @intrinsic_vrgather_mask_vi_nxv2f16_nxv2f16_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2f16_nxv2f16_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -3632,7 +3698,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -3656,7 +3722,7 @@ define @intrinsic_vrgather_mask_vi_nxv4f16_nxv4f16_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4f16_nxv4f16_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -3665,7 +3731,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -3689,7 +3755,7 @@ define @intrinsic_vrgather_mask_vi_nxv8f16_nxv8f16_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8f16_nxv8f16_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vrgather.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -3698,7 +3764,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -3722,7 +3788,7 @@ define @intrinsic_vrgather_mask_vi_nxv16f16_nxv16f16_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16f16_nxv16f16_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vrgather.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -3731,7 +3797,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -3755,7 +3821,7 @@ define @intrinsic_vrgather_mask_vi_nxv32f16_nxv32f16_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv32f16_nxv32f16_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vrgather.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -3764,7 +3830,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -3788,7 +3854,7 @@ define @intrinsic_vrgather_mask_vi_nxv1f32_nxv1f32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1f32_nxv1f32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -3797,7 +3863,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -3821,7 +3887,7 @@ define @intrinsic_vrgather_mask_vi_nxv2f32_nxv2f32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2f32_nxv2f32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -3830,7 +3896,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -3854,7 +3920,7 @@ define @intrinsic_vrgather_mask_vi_nxv4f32_nxv4f32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4f32_nxv4f32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vrgather.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -3863,7 +3929,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -3887,7 +3953,7 @@ define @intrinsic_vrgather_mask_vi_nxv8f32_nxv8f32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8f32_nxv8f32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vrgather.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -3896,7 +3962,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -3920,7 +3986,7 @@ define @intrinsic_vrgather_mask_vi_nxv16f32_nxv16f32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16f32_nxv16f32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vrgather.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -3929,7 +3995,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -3953,7 +4019,7 @@ define @intrinsic_vrgather_mask_vi_nxv1f64_nxv1f64_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1f64_nxv1f64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -3962,7 +4028,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -3986,7 +4052,7 @@ define @intrinsic_vrgather_mask_vi_nxv2f64_nxv2f64_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2f64_nxv2f64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vrgather.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -3995,7 +4061,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -4019,7 +4085,7 @@ define @intrinsic_vrgather_mask_vi_nxv4f64_nxv4f64_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4f64_nxv4f64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vrgather.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -4028,7 +4094,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -4052,7 +4118,7 @@ define @intrinsic_vrgather_mask_vi_nxv8f64_nxv8f64_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8f64_nxv8f64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vrgather.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -4061,7 +4127,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll @@ -27,12 +27,13 @@ , , , + i64, i64); define @intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -41,7 +42,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -72,12 +73,13 @@ , , , + i64, i64); define @intrinsic_vrgather_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -86,7 +88,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -117,12 +119,13 @@ , , , + i64, i64); define @intrinsic_vrgather_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -131,7 +134,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -162,12 +165,13 @@ , , , + i64, i64); define @intrinsic_vrgather_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -176,7 +180,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -207,12 +211,13 @@ , , , + i64, i64); define @intrinsic_vrgather_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vrgather.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -221,7 +226,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -252,12 +257,13 @@ , , , + i64, i64); define @intrinsic_vrgather_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vrgather.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -266,7 +272,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -297,13 +303,14 @@ , , , + i64, i64); define @intrinsic_vrgather_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -312,7 +319,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -343,12 +350,13 @@ , , , + i64, i64); define @intrinsic_vrgather_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -357,7 +365,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -388,12 +396,13 @@ , , , + i64, i64); define @intrinsic_vrgather_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -402,7 +411,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -433,12 +442,13 @@ , , , + i64, i64); define @intrinsic_vrgather_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -447,7 +457,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -478,12 +488,13 @@ , , , + i64, i64); define @intrinsic_vrgather_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vrgather.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -492,7 +503,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -523,12 +534,13 @@ , , , + i64, i64); define @intrinsic_vrgather_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vrgather.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -537,7 +549,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -568,13 +580,14 @@ , , , + i64, i64); define @intrinsic_vrgather_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -583,7 +596,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -614,12 +627,13 @@ , , , + i64, i64); define @intrinsic_vrgather_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -628,7 +642,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -659,12 +673,13 @@ , , , + i64, i64); define @intrinsic_vrgather_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -673,7 +688,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -704,12 +719,13 @@ , , , + i64, i64); define @intrinsic_vrgather_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vrgather.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -718,7 +734,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -749,12 +765,13 @@ , , , + i64, i64); define @intrinsic_vrgather_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vrgather.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -763,7 +780,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -794,13 +811,14 @@ , , , + i64, i64); define @intrinsic_vrgather_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -809,7 +827,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -840,12 +858,13 @@ , , , + i64, i64); define @intrinsic_vrgather_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -854,7 +873,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -885,12 +904,13 @@ , , , + i64, i64); define @intrinsic_vrgather_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vrgather.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -899,7 +919,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -930,12 +950,13 @@ , , , + i64, i64); define @intrinsic_vrgather_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vrgather.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -944,7 +965,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -975,13 +996,14 @@ , , , + i64, i64); define @intrinsic_vrgather_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -990,7 +1012,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1021,12 +1043,13 @@ , , , + i64, i64); define @intrinsic_vrgather_mask_vv_nxv1f16_nxv1f16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1f16_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -1035,7 +1058,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1066,12 +1089,13 @@ , , , + i64, i64); define @intrinsic_vrgather_mask_vv_nxv2f16_nxv2f16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2f16_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -1080,7 +1104,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1111,12 +1135,13 @@ , , , + i64, i64); define @intrinsic_vrgather_mask_vv_nxv4f16_nxv4f16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4f16_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -1125,7 +1150,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1156,12 +1181,13 @@ , , , + i64, i64); define @intrinsic_vrgather_mask_vv_nxv8f16_nxv8f16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8f16_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vrgather.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -1170,7 +1196,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1201,12 +1227,13 @@ , , , + i64, i64); define @intrinsic_vrgather_mask_vv_nxv16f16_nxv16f16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16f16_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vrgather.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -1215,7 +1242,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1246,13 +1273,14 @@ , , , + i64, i64); define @intrinsic_vrgather_mask_vv_nxv32f16_nxv32f16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv32f16_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -1261,7 +1289,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1292,12 +1320,13 @@ , , , + i64, i64); define @intrinsic_vrgather_mask_vv_nxv1f32_nxv1f32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1f32_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -1306,7 +1335,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1337,12 +1366,13 @@ , , , + i64, i64); define @intrinsic_vrgather_mask_vv_nxv2f32_nxv2f32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2f32_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -1351,7 +1381,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1382,12 +1412,13 @@ , , , + i64, i64); define @intrinsic_vrgather_mask_vv_nxv4f32_nxv4f32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4f32_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vrgather.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -1396,7 +1427,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1427,12 +1458,13 @@ , , , + i64, i64); define @intrinsic_vrgather_mask_vv_nxv8f32_nxv8f32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8f32_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vrgather.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -1441,7 +1473,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1472,13 +1504,14 @@ , , , + i64, i64); define @intrinsic_vrgather_mask_vv_nxv16f32_nxv16f32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16f32_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -1487,7 +1520,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1518,12 +1551,13 @@ , , , + i64, i64); define @intrinsic_vrgather_mask_vv_nxv1f64_nxv1f64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1f64_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -1532,7 +1566,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1563,12 +1597,13 @@ , , , + i64, i64); define @intrinsic_vrgather_mask_vv_nxv2f64_nxv2f64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2f64_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vrgather.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -1577,7 +1612,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1608,12 +1643,13 @@ , , , + i64, i64); define @intrinsic_vrgather_mask_vv_nxv4f64_nxv4f64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4f64_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vrgather.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -1622,7 +1658,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1653,13 +1689,14 @@ , , , + i64, i64); define @intrinsic_vrgather_mask_vv_nxv8f64_nxv8f64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8f64_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -1668,7 +1705,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1699,12 +1736,13 @@ , i64, , + i64, i64); define @intrinsic_vrgather_mask_vx_nxv1i8_nxv1i8_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1i8_nxv1i8_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1713,7 +1751,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1744,12 +1782,13 @@ , i64, , + i64, i64); define @intrinsic_vrgather_mask_vx_nxv2i8_nxv2i8_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2i8_nxv2i8_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1758,7 +1797,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1789,12 +1828,13 @@ , i64, , + i64, i64); define @intrinsic_vrgather_mask_vx_nxv4i8_nxv4i8_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4i8_nxv4i8_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1803,7 +1843,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1834,12 +1874,13 @@ , i64, , + i64, i64); define @intrinsic_vrgather_mask_vx_nxv8i8_nxv8i8_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8i8_nxv8i8_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1848,7 +1889,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1879,12 +1920,13 @@ , i64, , + i64, i64); define @intrinsic_vrgather_mask_vx_nxv16i8_nxv16i8_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16i8_nxv16i8_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vrgather.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1893,7 +1935,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1924,12 +1966,13 @@ , i64, , + i64, i64); define @intrinsic_vrgather_mask_vx_nxv32i8_nxv32i8_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv32i8_nxv32i8_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vrgather.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1938,7 +1981,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1969,12 +2012,13 @@ , i64, , + i64, i64); define @intrinsic_vrgather_mask_vx_nxv64i8_nxv64i8_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv64i8_nxv64i8_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vrgather.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1983,7 +2027,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -2014,12 +2058,13 @@ , i64, , + i64, i64); define @intrinsic_vrgather_mask_vx_nxv1i16_nxv1i16_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1i16_nxv1i16_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -2028,7 +2073,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -2059,12 +2104,13 @@ , i64, , + i64, i64); define @intrinsic_vrgather_mask_vx_nxv2i16_nxv2i16_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2i16_nxv2i16_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -2073,7 +2119,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -2104,12 +2150,13 @@ , i64, , + i64, i64); define @intrinsic_vrgather_mask_vx_nxv4i16_nxv4i16_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4i16_nxv4i16_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -2118,7 +2165,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -2149,12 +2196,13 @@ , i64, , + i64, i64); define @intrinsic_vrgather_mask_vx_nxv8i16_nxv8i16_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8i16_nxv8i16_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vrgather.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -2163,7 +2211,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -2194,12 +2242,13 @@ , i64, , + i64, i64); define @intrinsic_vrgather_mask_vx_nxv16i16_nxv16i16_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16i16_nxv16i16_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vrgather.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -2208,7 +2257,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -2239,12 +2288,13 @@ , i64, , + i64, i64); define @intrinsic_vrgather_mask_vx_nxv32i16_nxv32i16_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv32i16_nxv32i16_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vrgather.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -2253,7 +2303,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -2284,12 +2334,13 @@ , i64, , + i64, i64); define @intrinsic_vrgather_mask_vx_nxv1i32_nxv1i32_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1i32_nxv1i32_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -2298,7 +2349,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -2329,12 +2380,13 @@ , i64, , + i64, i64); define @intrinsic_vrgather_mask_vx_nxv2i32_nxv2i32_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2i32_nxv2i32_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -2343,7 +2395,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -2374,12 +2426,13 @@ , i64, , + i64, i64); define @intrinsic_vrgather_mask_vx_nxv4i32_nxv4i32_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4i32_nxv4i32_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vrgather.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -2388,7 +2441,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -2419,12 +2472,13 @@ , i64, , + i64, i64); define @intrinsic_vrgather_mask_vx_nxv8i32_nxv8i32_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8i32_nxv8i32_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vrgather.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -2433,7 +2487,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -2464,12 +2518,13 @@ , i64, , + i64, i64); define @intrinsic_vrgather_mask_vx_nxv16i32_nxv16i32_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16i32_nxv16i32_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vrgather.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -2478,7 +2533,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -2509,12 +2564,13 @@ , i64, , + i64, i64); define @intrinsic_vrgather_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -2523,7 +2579,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -2554,12 +2610,13 @@ , i64, , + i64, i64); define @intrinsic_vrgather_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vrgather.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -2568,7 +2625,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -2599,12 +2656,13 @@ , i64, , + i64, i64); define @intrinsic_vrgather_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vrgather.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -2613,7 +2671,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -2644,12 +2702,13 @@ , i64, , + i64, i64); define @intrinsic_vrgather_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vrgather.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -2658,7 +2717,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -2689,12 +2748,13 @@ , i64, , + i64, i64); define @intrinsic_vrgather_mask_vx_nxv1f16_nxv1f16_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1f16_nxv1f16_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -2703,7 +2763,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -2734,12 +2794,13 @@ , i64, , + i64, i64); define @intrinsic_vrgather_mask_vx_nxv2f16_nxv2f16_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2f16_nxv2f16_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -2748,7 +2809,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -2779,12 +2840,13 @@ , i64, , + i64, i64); define @intrinsic_vrgather_mask_vx_nxv4f16_nxv4f16_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4f16_nxv4f16_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -2793,7 +2855,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -2824,12 +2886,13 @@ , i64, , + i64, i64); define @intrinsic_vrgather_mask_vx_nxv8f16_nxv8f16_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8f16_nxv8f16_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vrgather.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -2838,7 +2901,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -2869,12 +2932,13 @@ , i64, , + i64, i64); define @intrinsic_vrgather_mask_vx_nxv16f16_nxv16f16_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16f16_nxv16f16_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vrgather.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -2883,7 +2947,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -2914,12 +2978,13 @@ , i64, , + i64, i64); define @intrinsic_vrgather_mask_vx_nxv32f16_nxv32f16_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv32f16_nxv32f16_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vrgather.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -2928,7 +2993,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -2959,12 +3024,13 @@ , i64, , + i64, i64); define @intrinsic_vrgather_mask_vx_nxv1f32_nxv1f32_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1f32_nxv1f32_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -2973,7 +3039,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -3004,12 +3070,13 @@ , i64, , + i64, i64); define @intrinsic_vrgather_mask_vx_nxv2f32_nxv2f32_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2f32_nxv2f32_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -3018,7 +3085,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -3049,12 +3116,13 @@ , i64, , + i64, i64); define @intrinsic_vrgather_mask_vx_nxv4f32_nxv4f32_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4f32_nxv4f32_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vrgather.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -3063,7 +3131,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -3094,12 +3162,13 @@ , i64, , + i64, i64); define @intrinsic_vrgather_mask_vx_nxv8f32_nxv8f32_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8f32_nxv8f32_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vrgather.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -3108,7 +3177,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -3139,12 +3208,13 @@ , i64, , + i64, i64); define @intrinsic_vrgather_mask_vx_nxv16f32_nxv16f32_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16f32_nxv16f32_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vrgather.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -3153,7 +3223,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -3184,12 +3254,13 @@ , i64, , + i64, i64); define @intrinsic_vrgather_mask_vx_nxv1f64_nxv1f64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1f64_nxv1f64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -3198,7 +3269,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -3229,12 +3300,13 @@ , i64, , + i64, i64); define @intrinsic_vrgather_mask_vx_nxv2f64_nxv2f64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2f64_nxv2f64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vrgather.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -3243,7 +3315,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -3274,12 +3346,13 @@ , i64, , + i64, i64); define @intrinsic_vrgather_mask_vx_nxv4f64_nxv4f64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4f64_nxv4f64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vrgather.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -3288,7 +3361,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -3319,12 +3392,13 @@ , i64, , + i64, i64); define @intrinsic_vrgather_mask_vx_nxv8f64_nxv8f64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8f64_nxv8f64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vrgather.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -3333,7 +3407,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -3357,7 +3431,7 @@ define @intrinsic_vrgather_mask_vi_nxv1i8_nxv1i8_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1i8_nxv1i8_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -3366,7 +3440,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -3390,7 +3464,7 @@ define @intrinsic_vrgather_mask_vi_nxv2i8_nxv2i8_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2i8_nxv2i8_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -3399,7 +3473,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -3423,7 +3497,7 @@ define @intrinsic_vrgather_mask_vi_nxv4i8_nxv4i8_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4i8_nxv4i8_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -3432,7 +3506,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -3456,7 +3530,7 @@ define @intrinsic_vrgather_mask_vi_nxv8i8_nxv8i8_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8i8_nxv8i8_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -3465,7 +3539,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -3489,7 +3563,7 @@ define @intrinsic_vrgather_mask_vi_nxv16i8_nxv16i8_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16i8_nxv16i8_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vrgather.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -3498,7 +3572,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -3522,7 +3596,7 @@ define @intrinsic_vrgather_mask_vi_nxv32i8_nxv32i8_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv32i8_nxv32i8_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vrgather.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -3531,7 +3605,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -3555,7 +3629,7 @@ define @intrinsic_vrgather_mask_vi_nxv64i8_nxv64i8_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv64i8_nxv64i8_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vrgather.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -3564,7 +3638,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -3588,7 +3662,7 @@ define @intrinsic_vrgather_mask_vi_nxv1i16_nxv1i16_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1i16_nxv1i16_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -3597,7 +3671,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -3621,7 +3695,7 @@ define @intrinsic_vrgather_mask_vi_nxv2i16_nxv2i16_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2i16_nxv2i16_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -3630,7 +3704,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -3654,7 +3728,7 @@ define @intrinsic_vrgather_mask_vi_nxv4i16_nxv4i16_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4i16_nxv4i16_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -3663,7 +3737,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -3687,7 +3761,7 @@ define @intrinsic_vrgather_mask_vi_nxv8i16_nxv8i16_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8i16_nxv8i16_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vrgather.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -3696,7 +3770,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -3720,7 +3794,7 @@ define @intrinsic_vrgather_mask_vi_nxv16i16_nxv16i16_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16i16_nxv16i16_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vrgather.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -3729,7 +3803,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -3753,7 +3827,7 @@ define @intrinsic_vrgather_mask_vi_nxv32i16_nxv32i16_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv32i16_nxv32i16_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vrgather.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -3762,7 +3836,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -3786,7 +3860,7 @@ define @intrinsic_vrgather_mask_vi_nxv1i32_nxv1i32_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1i32_nxv1i32_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -3795,7 +3869,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -3819,7 +3893,7 @@ define @intrinsic_vrgather_mask_vi_nxv2i32_nxv2i32_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2i32_nxv2i32_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -3828,7 +3902,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -3852,7 +3926,7 @@ define @intrinsic_vrgather_mask_vi_nxv4i32_nxv4i32_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4i32_nxv4i32_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vrgather.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -3861,7 +3935,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -3885,7 +3959,7 @@ define @intrinsic_vrgather_mask_vi_nxv8i32_nxv8i32_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8i32_nxv8i32_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vrgather.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -3894,7 +3968,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -3918,7 +3992,7 @@ define @intrinsic_vrgather_mask_vi_nxv16i32_nxv16i32_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16i32_nxv16i32_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vrgather.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -3927,7 +4001,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -3951,7 +4025,7 @@ define @intrinsic_vrgather_mask_vi_nxv1i64_nxv1i64_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -3960,7 +4034,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -3984,7 +4058,7 @@ define @intrinsic_vrgather_mask_vi_nxv2i64_nxv2i64_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vrgather.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -3993,7 +4067,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -4017,7 +4091,7 @@ define @intrinsic_vrgather_mask_vi_nxv4i64_nxv4i64_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vrgather.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -4026,7 +4100,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -4050,7 +4124,7 @@ define @intrinsic_vrgather_mask_vi_nxv8i64_nxv8i64_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vrgather.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -4059,7 +4133,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -4083,7 +4157,7 @@ define @intrinsic_vrgather_mask_vi_nxv1f16_nxv1f16_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1f16_nxv1f16_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -4092,7 +4166,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -4116,7 +4190,7 @@ define @intrinsic_vrgather_mask_vi_nxv2f16_nxv2f16_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2f16_nxv2f16_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -4125,7 +4199,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -4149,7 +4223,7 @@ define @intrinsic_vrgather_mask_vi_nxv4f16_nxv4f16_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4f16_nxv4f16_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -4158,7 +4232,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -4182,7 +4256,7 @@ define @intrinsic_vrgather_mask_vi_nxv8f16_nxv8f16_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8f16_nxv8f16_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vrgather.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -4191,7 +4265,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -4215,7 +4289,7 @@ define @intrinsic_vrgather_mask_vi_nxv16f16_nxv16f16_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16f16_nxv16f16_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vrgather.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -4224,7 +4298,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -4248,7 +4322,7 @@ define @intrinsic_vrgather_mask_vi_nxv32f16_nxv32f16_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv32f16_nxv32f16_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vrgather.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -4257,7 +4331,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -4281,7 +4355,7 @@ define @intrinsic_vrgather_mask_vi_nxv1f32_nxv1f32_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1f32_nxv1f32_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -4290,7 +4364,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -4314,7 +4388,7 @@ define @intrinsic_vrgather_mask_vi_nxv2f32_nxv2f32_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2f32_nxv2f32_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -4323,7 +4397,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -4347,7 +4421,7 @@ define @intrinsic_vrgather_mask_vi_nxv4f32_nxv4f32_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4f32_nxv4f32_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vrgather.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -4356,7 +4430,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -4380,7 +4454,7 @@ define @intrinsic_vrgather_mask_vi_nxv8f32_nxv8f32_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8f32_nxv8f32_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vrgather.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -4389,7 +4463,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -4413,7 +4487,7 @@ define @intrinsic_vrgather_mask_vi_nxv16f32_nxv16f32_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16f32_nxv16f32_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vrgather.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -4422,7 +4496,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -4446,7 +4520,7 @@ define @intrinsic_vrgather_mask_vi_nxv1f64_nxv1f64_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1f64_nxv1f64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -4455,7 +4529,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -4479,7 +4553,7 @@ define @intrinsic_vrgather_mask_vi_nxv2f64_nxv2f64_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2f64_nxv2f64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vrgather.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -4488,7 +4562,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -4512,7 +4586,7 @@ define @intrinsic_vrgather_mask_vi_nxv4f64_nxv4f64_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4f64_nxv4f64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vrgather.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -4521,7 +4595,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -4545,7 +4619,7 @@ define @intrinsic_vrgather_mask_vi_nxv8f64_nxv8f64_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8f64_nxv8f64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vrgather.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -4554,7 +4628,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv32.ll @@ -27,12 +27,13 @@ , , , + i32, i32); define @intrinsic_vrgatherei16_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vrgatherei16.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -41,7 +42,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -72,12 +73,13 @@ , , , + i32, i32); define @intrinsic_vrgatherei16_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vrgatherei16.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -86,7 +88,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -117,12 +119,13 @@ , , , + i32, i32); define @intrinsic_vrgatherei16_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vrgatherei16.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -131,7 +134,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -162,12 +165,13 @@ , , , + i32, i32); define @intrinsic_vrgatherei16_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vrgatherei16.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -176,7 +180,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -207,12 +211,13 @@ , , , + i32, i32); define @intrinsic_vrgatherei16_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vrgatherei16.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -221,7 +226,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -252,12 +257,13 @@ , , , + i32, i32); define @intrinsic_vrgatherei16_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vrgatherei16.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -266,7 +272,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -297,12 +303,13 @@ , , , + i32, i32); define @intrinsic_vrgatherei16_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vrgatherei16.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -311,7 +318,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -342,12 +349,13 @@ , , , + i32, i32); define @intrinsic_vrgatherei16_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vrgatherei16.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -356,7 +364,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -387,12 +395,13 @@ , , , + i32, i32); define @intrinsic_vrgatherei16_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vrgatherei16.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -401,7 +410,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -432,12 +441,13 @@ , , , + i32, i32); define @intrinsic_vrgatherei16_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vrgatherei16.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -446,7 +456,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -477,12 +487,13 @@ , , , + i32, i32); define @intrinsic_vrgatherei16_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vrgatherei16.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -491,7 +502,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -522,13 +533,14 @@ , , , + i32, i32); define @intrinsic_vrgatherei16_mask_vv_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vrgatherei16.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -537,7 +549,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -568,12 +580,13 @@ , , , + i32, i32); define @intrinsic_vrgatherei16_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vrgatherei16.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -582,7 +595,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -613,12 +626,13 @@ , , , + i32, i32); define @intrinsic_vrgatherei16_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vrgatherei16.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -627,7 +641,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -658,12 +672,13 @@ , , , + i32, i32); define @intrinsic_vrgatherei16_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vrgatherei16.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -672,7 +687,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -703,13 +718,14 @@ , , , + i32, i32); define @intrinsic_vrgatherei16_mask_vv_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl4re16.v v28, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vrgatherei16.vv v8, v16, v28, v0.t ; CHECK-NEXT: ret entry: @@ -718,7 +734,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -749,12 +765,13 @@ , , , + i32, i32); define @intrinsic_vrgatherei16_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vrgatherei16.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -763,7 +780,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -794,13 +811,14 @@ , , , + i32, i32); define @intrinsic_vrgatherei16_mask_vv_nxv8i64_nxv8i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl2re16.v v26, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vrgatherei16.vv v8, v16, v26, v0.t ; CHECK-NEXT: ret entry: @@ -809,7 +827,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -840,12 +858,13 @@ , , , + i32, i32); define @intrinsic_vrgatherei16_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vrgatherei16.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -854,7 +873,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -885,12 +904,13 @@ , , , + i32, i32); define @intrinsic_vrgatherei16_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vrgatherei16.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -899,7 +919,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -930,12 +950,13 @@ , , , + i32, i32); define @intrinsic_vrgatherei16_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vrgatherei16.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -944,7 +965,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -975,12 +996,13 @@ , , , + i32, i32); define @intrinsic_vrgatherei16_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vrgatherei16.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -989,7 +1011,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1020,12 +1042,13 @@ , , , + i32, i32); define @intrinsic_vrgatherei16_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vrgatherei16.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -1034,7 +1057,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1065,13 +1088,14 @@ , , , + i32, i32); define @intrinsic_vrgatherei16_mask_vv_nxv32f16_nxv32f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vrgatherei16.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -1080,7 +1104,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1111,12 +1135,13 @@ , , , + i32, i32); define @intrinsic_vrgatherei16_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vrgatherei16.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -1125,7 +1150,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1156,12 +1181,13 @@ , , , + i32, i32); define @intrinsic_vrgatherei16_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vrgatherei16.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -1170,7 +1196,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1201,12 +1227,13 @@ , , , + i32, i32); define @intrinsic_vrgatherei16_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vrgatherei16.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -1215,7 +1242,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1246,13 +1273,14 @@ , , , + i32, i32); define @intrinsic_vrgatherei16_mask_vv_nxv16f32_nxv16f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl4re16.v v28, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vrgatherei16.vv v8, v16, v28, v0.t ; CHECK-NEXT: ret entry: @@ -1261,7 +1289,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1292,12 +1320,13 @@ , , , + i32, i32); define @intrinsic_vrgatherei16_mask_vv_nxv4f64_nxv4f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vrgatherei16.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -1306,7 +1335,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1337,13 +1366,14 @@ , , , + i32, i32); define @intrinsic_vrgatherei16_mask_vv_nxv8f64_nxv8f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl2re16.v v26, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vrgatherei16.vv v8, v16, v26, v0.t ; CHECK-NEXT: ret entry: @@ -1352,7 +1382,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv64.ll @@ -27,12 +27,13 @@ , , , + i64, i64); define @intrinsic_vrgatherei16_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vrgatherei16.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -41,7 +42,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -72,12 +73,13 @@ , , , + i64, i64); define @intrinsic_vrgatherei16_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vrgatherei16.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -86,7 +88,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -117,12 +119,13 @@ , , , + i64, i64); define @intrinsic_vrgatherei16_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vrgatherei16.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -131,7 +134,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -162,12 +165,13 @@ , , , + i64, i64); define @intrinsic_vrgatherei16_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vrgatherei16.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -176,7 +180,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -207,12 +211,13 @@ , , , + i64, i64); define @intrinsic_vrgatherei16_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vrgatherei16.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -221,7 +226,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -252,12 +257,13 @@ , , , + i64, i64); define @intrinsic_vrgatherei16_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vrgatherei16.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -266,7 +272,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -297,12 +303,13 @@ , , , + i64, i64); define @intrinsic_vrgatherei16_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vrgatherei16.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -311,7 +318,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -342,12 +349,13 @@ , , , + i64, i64); define @intrinsic_vrgatherei16_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vrgatherei16.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -356,7 +364,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -387,12 +395,13 @@ , , , + i64, i64); define @intrinsic_vrgatherei16_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vrgatherei16.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -401,7 +410,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -432,12 +441,13 @@ , , , + i64, i64); define @intrinsic_vrgatherei16_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vrgatherei16.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -446,7 +456,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -477,12 +487,13 @@ , , , + i64, i64); define @intrinsic_vrgatherei16_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vrgatherei16.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -491,7 +502,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -522,13 +533,14 @@ , , , + i64, i64); define @intrinsic_vrgatherei16_mask_vv_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vrgatherei16.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -537,7 +549,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -568,12 +580,13 @@ , , , + i64, i64); define @intrinsic_vrgatherei16_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vrgatherei16.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -582,7 +595,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -613,12 +626,13 @@ , , , + i64, i64); define @intrinsic_vrgatherei16_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vrgatherei16.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -627,7 +641,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -658,12 +672,13 @@ , , , + i64, i64); define @intrinsic_vrgatherei16_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vrgatherei16.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -672,7 +687,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -703,13 +718,14 @@ , , , + i64, i64); define @intrinsic_vrgatherei16_mask_vv_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl4re16.v v28, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vrgatherei16.vv v8, v16, v28, v0.t ; CHECK-NEXT: ret entry: @@ -718,7 +734,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -749,12 +765,13 @@ , , , + i64, i64); define @intrinsic_vrgatherei16_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vrgatherei16.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -763,7 +780,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -794,13 +811,14 @@ , , , + i64, i64); define @intrinsic_vrgatherei16_mask_vv_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl2re16.v v26, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vrgatherei16.vv v8, v16, v26, v0.t ; CHECK-NEXT: ret entry: @@ -809,7 +827,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -840,12 +858,13 @@ , , , + i64, i64); define @intrinsic_vrgatherei16_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vrgatherei16.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -854,7 +873,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -885,12 +904,13 @@ , , , + i64, i64); define @intrinsic_vrgatherei16_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vrgatherei16.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -899,7 +919,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -930,12 +950,13 @@ , , , + i64, i64); define @intrinsic_vrgatherei16_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vrgatherei16.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -944,7 +965,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -975,12 +996,13 @@ , , , + i64, i64); define @intrinsic_vrgatherei16_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vrgatherei16.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -989,7 +1011,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1020,12 +1042,13 @@ , , , + i64, i64); define @intrinsic_vrgatherei16_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vrgatherei16.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -1034,7 +1057,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1065,13 +1088,14 @@ , , , + i64, i64); define @intrinsic_vrgatherei16_mask_vv_nxv32f16_nxv32f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vrgatherei16.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -1080,7 +1104,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1111,12 +1135,13 @@ , , , + i64, i64); define @intrinsic_vrgatherei16_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vrgatherei16.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -1125,7 +1150,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1156,12 +1181,13 @@ , , , + i64, i64); define @intrinsic_vrgatherei16_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vrgatherei16.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -1170,7 +1196,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1201,12 +1227,13 @@ , , , + i64, i64); define @intrinsic_vrgatherei16_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vrgatherei16.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -1215,7 +1242,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1246,13 +1273,14 @@ , , , + i64, i64); define @intrinsic_vrgatherei16_mask_vv_nxv16f32_nxv16f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl4re16.v v28, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vrgatherei16.vv v8, v16, v28, v0.t ; CHECK-NEXT: ret entry: @@ -1261,7 +1289,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1292,12 +1320,13 @@ , , , + i64, i64); define @intrinsic_vrgatherei16_mask_vv_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vrgatherei16.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -1306,7 +1335,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1337,13 +1366,14 @@ , , , + i64, i64); define @intrinsic_vrgatherei16_mask_vv_nxv8f64_nxv8f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl2re16.v v26, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vrgatherei16.vv v8, v16, v26, v0.t ; CHECK-NEXT: ret entry: @@ -1352,7 +1382,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vrsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrsub-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrsub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrsub-rv32.ll @@ -26,12 +26,13 @@ , i8, , + i32, i32); define @intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vrsub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -70,12 +71,13 @@ , i8, , + i32, i32); define @intrinsic_vrsub_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vrsub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -114,12 +116,13 @@ , i8, , + i32, i32); define @intrinsic_vrsub_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vrsub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -158,12 +161,13 @@ , i8, , + i32, i32); define @intrinsic_vrsub_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vrsub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -202,12 +206,13 @@ , i8, , + i32, i32); define @intrinsic_vrsub_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vrsub.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -246,12 +251,13 @@ , i8, , + i32, i32); define @intrinsic_vrsub_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vrsub.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -260,7 +266,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -290,12 +296,13 @@ , i8, , + i32, i32); define @intrinsic_vrsub_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vrsub.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -304,7 +311,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -334,12 +341,13 @@ , i16, , + i32, i32); define @intrinsic_vrsub_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vrsub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -348,7 +356,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -378,12 +386,13 @@ , i16, , + i32, i32); define @intrinsic_vrsub_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vrsub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -392,7 +401,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -422,12 +431,13 @@ , i16, , + i32, i32); define @intrinsic_vrsub_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vrsub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -436,7 +446,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -466,12 +476,13 @@ , i16, , + i32, i32); define @intrinsic_vrsub_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vrsub.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -480,7 +491,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -510,12 +521,13 @@ , i16, , + i32, i32); define @intrinsic_vrsub_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vrsub.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -524,7 +536,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -554,12 +566,13 @@ , i16, , + i32, i32); define @intrinsic_vrsub_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vrsub.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -568,7 +581,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -598,12 +611,13 @@ , i32, , + i32, i32); define @intrinsic_vrsub_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vrsub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -612,7 +626,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -642,12 +656,13 @@ , i32, , + i32, i32); define @intrinsic_vrsub_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vrsub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -656,7 +671,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -686,12 +701,13 @@ , i32, , + i32, i32); define @intrinsic_vrsub_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vrsub.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -700,7 +716,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -730,12 +746,13 @@ , i32, , + i32, i32); define @intrinsic_vrsub_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vrsub.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -744,7 +761,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -774,12 +791,13 @@ , i32, , + i32, i32); define @intrinsic_vrsub_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vrsub.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -788,7 +806,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -824,6 +842,7 @@ , i64, , + i32, i32); define @intrinsic_vrsub_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -832,10 +851,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v25, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vsub.vv v8, v25, v9, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -845,7 +864,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -881,6 +900,7 @@ , i64, , + i32, i32); define @intrinsic_vrsub_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -889,10 +909,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v26, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vsub.vv v8, v26, v10, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -902,7 +922,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -938,6 +958,7 @@ , i64, , + i32, i32); define @intrinsic_vrsub_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -946,10 +967,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v28, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vsub.vv v8, v28, v12, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -959,7 +980,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -995,6 +1016,7 @@ , i64, , + i32, i32); define @intrinsic_vrsub_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1003,10 +1025,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v24, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vsub.vv v8, v24, v16, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1016,7 +1038,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1039,7 +1061,7 @@ define @intrinsic_vrsub_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vrsub.vi v8, v9, -9, v0.t ; CHECK-NEXT: ret entry: @@ -1048,7 +1070,7 @@ %1, i8 -9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1071,7 +1093,7 @@ define @intrinsic_vrsub_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vrsub.vi v8, v9, -9, v0.t ; CHECK-NEXT: ret entry: @@ -1080,7 +1102,7 @@ %1, i8 -9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1103,7 +1125,7 @@ define @intrinsic_vrsub_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vrsub.vi v8, v9, -9, v0.t ; CHECK-NEXT: ret entry: @@ -1112,7 +1134,7 @@ %1, i8 -9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1135,7 +1157,7 @@ define @intrinsic_vrsub_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vrsub.vi v8, v9, -9, v0.t ; CHECK-NEXT: ret entry: @@ -1144,7 +1166,7 @@ %1, i8 -9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1167,7 +1189,7 @@ define @intrinsic_vrsub_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vrsub.vi v8, v10, -9, v0.t ; CHECK-NEXT: ret entry: @@ -1176,7 +1198,7 @@ %1, i8 -9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1199,7 +1221,7 @@ define @intrinsic_vrsub_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vrsub.vi v8, v12, -9, v0.t ; CHECK-NEXT: ret entry: @@ -1208,7 +1230,7 @@ %1, i8 -9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1231,7 +1253,7 @@ define @intrinsic_vrsub_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vrsub.vi v8, v16, -9, v0.t ; CHECK-NEXT: ret entry: @@ -1240,7 +1262,7 @@ %1, i8 -9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1263,7 +1285,7 @@ define @intrinsic_vrsub_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vrsub.vi v8, v9, -9, v0.t ; CHECK-NEXT: ret entry: @@ -1272,7 +1294,7 @@ %1, i16 -9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1295,7 +1317,7 @@ define @intrinsic_vrsub_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vrsub.vi v8, v9, -9, v0.t ; CHECK-NEXT: ret entry: @@ -1304,7 +1326,7 @@ %1, i16 -9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1327,7 +1349,7 @@ define @intrinsic_vrsub_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vrsub.vi v8, v9, -9, v0.t ; CHECK-NEXT: ret entry: @@ -1336,7 +1358,7 @@ %1, i16 -9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1359,7 +1381,7 @@ define @intrinsic_vrsub_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vrsub.vi v8, v10, -9, v0.t ; CHECK-NEXT: ret entry: @@ -1368,7 +1390,7 @@ %1, i16 -9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1391,7 +1413,7 @@ define @intrinsic_vrsub_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vrsub.vi v8, v12, -9, v0.t ; CHECK-NEXT: ret entry: @@ -1400,7 +1422,7 @@ %1, i16 -9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1423,7 +1445,7 @@ define @intrinsic_vrsub_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vrsub.vi v8, v16, -9, v0.t ; CHECK-NEXT: ret entry: @@ -1432,7 +1454,7 @@ %1, i16 -9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1455,7 +1477,7 @@ define @intrinsic_vrsub_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vrsub.vi v8, v9, -9, v0.t ; CHECK-NEXT: ret entry: @@ -1464,7 +1486,7 @@ %1, i32 -9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1487,7 +1509,7 @@ define @intrinsic_vrsub_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vrsub.vi v8, v9, -9, v0.t ; CHECK-NEXT: ret entry: @@ -1496,7 +1518,7 @@ %1, i32 -9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1519,7 +1541,7 @@ define @intrinsic_vrsub_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vrsub.vi v8, v10, -9, v0.t ; CHECK-NEXT: ret entry: @@ -1528,7 +1550,7 @@ %1, i32 -9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1551,7 +1573,7 @@ define @intrinsic_vrsub_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vrsub.vi v8, v12, -9, v0.t ; CHECK-NEXT: ret entry: @@ -1560,7 +1582,7 @@ %1, i32 -9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1583,7 +1605,7 @@ define @intrinsic_vrsub_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vrsub.vi v8, v16, -9, v0.t ; CHECK-NEXT: ret entry: @@ -1592,7 +1614,7 @@ %1, i32 -9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1615,7 +1637,7 @@ define @intrinsic_vrsub_mask_vi_nxv1i64_nxv1i64_i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vrsub.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1624,7 +1646,7 @@ %1, i64 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1647,7 +1669,7 @@ define @intrinsic_vrsub_mask_vi_nxv2i64_nxv2i64_i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vrsub.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1656,7 +1678,7 @@ %1, i64 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1679,7 +1701,7 @@ define @intrinsic_vrsub_mask_vi_nxv4i64_nxv4i64_i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vrsub.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1688,7 +1710,7 @@ %1, i64 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1711,7 +1733,7 @@ define @intrinsic_vrsub_mask_vi_nxv8i64_nxv8i64_i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vrsub.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1720,7 +1742,7 @@ %1, i64 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vrsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vrsub-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrsub-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrsub-rv64.ll @@ -26,12 +26,13 @@ , i8, , + i64, i64); define @intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vrsub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -70,12 +71,13 @@ , i8, , + i64, i64); define @intrinsic_vrsub_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vrsub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -114,12 +116,13 @@ , i8, , + i64, i64); define @intrinsic_vrsub_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vrsub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -158,12 +161,13 @@ , i8, , + i64, i64); define @intrinsic_vrsub_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vrsub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -202,12 +206,13 @@ , i8, , + i64, i64); define @intrinsic_vrsub_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vrsub.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -246,12 +251,13 @@ , i8, , + i64, i64); define @intrinsic_vrsub_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vrsub.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -260,7 +266,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -290,12 +296,13 @@ , i8, , + i64, i64); define @intrinsic_vrsub_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vrsub.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -304,7 +311,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -334,12 +341,13 @@ , i16, , + i64, i64); define @intrinsic_vrsub_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vrsub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -348,7 +356,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -378,12 +386,13 @@ , i16, , + i64, i64); define @intrinsic_vrsub_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vrsub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -392,7 +401,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -422,12 +431,13 @@ , i16, , + i64, i64); define @intrinsic_vrsub_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vrsub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -436,7 +446,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -466,12 +476,13 @@ , i16, , + i64, i64); define @intrinsic_vrsub_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vrsub.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -480,7 +491,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -510,12 +521,13 @@ , i16, , + i64, i64); define @intrinsic_vrsub_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vrsub.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -524,7 +536,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -554,12 +566,13 @@ , i16, , + i64, i64); define @intrinsic_vrsub_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vrsub.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -568,7 +581,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -598,12 +611,13 @@ , i32, , + i64, i64); define @intrinsic_vrsub_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vrsub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -612,7 +626,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -642,12 +656,13 @@ , i32, , + i64, i64); define @intrinsic_vrsub_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vrsub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -656,7 +671,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -686,12 +701,13 @@ , i32, , + i64, i64); define @intrinsic_vrsub_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vrsub.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -700,7 +716,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -730,12 +746,13 @@ , i32, , + i64, i64); define @intrinsic_vrsub_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vrsub.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -744,7 +761,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -774,12 +791,13 @@ , i32, , + i64, i64); define @intrinsic_vrsub_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vrsub.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -788,7 +806,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -818,12 +836,13 @@ , i64, , + i64, i64); define @intrinsic_vrsub_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vrsub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -832,7 +851,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -862,12 +881,13 @@ , i64, , + i64, i64); define @intrinsic_vrsub_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vrsub.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -876,7 +896,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -906,12 +926,13 @@ , i64, , + i64, i64); define @intrinsic_vrsub_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vrsub.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -920,7 +941,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -950,12 +971,13 @@ , i64, , + i64, i64); define @intrinsic_vrsub_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vrsub.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -964,7 +986,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -987,7 +1009,7 @@ define @intrinsic_vrsub_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vrsub.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -996,7 +1018,7 @@ %1, i8 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1019,7 +1041,7 @@ define @intrinsic_vrsub_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vrsub.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1028,7 +1050,7 @@ %1, i8 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1051,7 +1073,7 @@ define @intrinsic_vrsub_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vrsub.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1060,7 +1082,7 @@ %1, i8 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1083,7 +1105,7 @@ define @intrinsic_vrsub_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vrsub.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1092,7 +1114,7 @@ %1, i8 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1115,7 +1137,7 @@ define @intrinsic_vrsub_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vrsub.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1124,7 +1146,7 @@ %1, i8 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1147,7 +1169,7 @@ define @intrinsic_vrsub_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vrsub.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1156,7 +1178,7 @@ %1, i8 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1179,7 +1201,7 @@ define @intrinsic_vrsub_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vrsub.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1188,7 +1210,7 @@ %1, i8 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1211,7 +1233,7 @@ define @intrinsic_vrsub_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vrsub.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1220,7 +1242,7 @@ %1, i16 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1243,7 +1265,7 @@ define @intrinsic_vrsub_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vrsub.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1252,7 +1274,7 @@ %1, i16 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1275,7 +1297,7 @@ define @intrinsic_vrsub_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vrsub.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1284,7 +1306,7 @@ %1, i16 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1307,7 +1329,7 @@ define @intrinsic_vrsub_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vrsub.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1316,7 +1338,7 @@ %1, i16 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1339,7 +1361,7 @@ define @intrinsic_vrsub_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vrsub.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1348,7 +1370,7 @@ %1, i16 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1371,7 +1393,7 @@ define @intrinsic_vrsub_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vrsub.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1380,7 +1402,7 @@ %1, i16 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1403,7 +1425,7 @@ define @intrinsic_vrsub_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vrsub.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1412,7 +1434,7 @@ %1, i32 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1435,7 +1457,7 @@ define @intrinsic_vrsub_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vrsub.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1444,7 +1466,7 @@ %1, i32 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1467,7 +1489,7 @@ define @intrinsic_vrsub_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vrsub.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1476,7 +1498,7 @@ %1, i32 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1499,7 +1521,7 @@ define @intrinsic_vrsub_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vrsub.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1508,7 +1530,7 @@ %1, i32 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1531,7 +1553,7 @@ define @intrinsic_vrsub_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vrsub.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1540,7 +1562,7 @@ %1, i32 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1563,7 +1585,7 @@ define @intrinsic_vrsub_mask_vi_nxv1i64_nxv1i64_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vrsub.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1572,7 +1594,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1595,7 +1617,7 @@ define @intrinsic_vrsub_mask_vi_nxv2i64_nxv2i64_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vrsub.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1604,7 +1626,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1627,7 +1649,7 @@ define @intrinsic_vrsub_mask_vi_nxv4i64_nxv4i64_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vrsub.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1636,7 +1658,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1659,7 +1681,7 @@ define @intrinsic_vrsub_mask_vi_nxv8i64_nxv8i64_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vrsub.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1668,7 +1690,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vsadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsadd-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsadd-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsadd-rv32.ll @@ -26,12 +26,13 @@ , , , + i32, i32); define @intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -70,12 +71,13 @@ , , , + i32, i32); define @intrinsic_vsadd_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -114,12 +116,13 @@ , , , + i32, i32); define @intrinsic_vsadd_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -158,12 +161,13 @@ , , , + i32, i32); define @intrinsic_vsadd_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -202,12 +206,13 @@ , , , + i32, i32); define @intrinsic_vsadd_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vsadd.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -246,12 +251,13 @@ , , , + i32, i32); define @intrinsic_vsadd_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vsadd.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -260,7 +266,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -290,13 +296,14 @@ , , , + i32, i32); define @intrinsic_vsadd_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vsadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -335,12 +342,13 @@ , , , + i32, i32); define @intrinsic_vsadd_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -379,12 +387,13 @@ , , , + i32, i32); define @intrinsic_vsadd_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -423,12 +432,13 @@ , , , + i32, i32); define @intrinsic_vsadd_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -467,12 +477,13 @@ , , , + i32, i32); define @intrinsic_vsadd_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vsadd.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -481,7 +492,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -511,12 +522,13 @@ , , , + i32, i32); define @intrinsic_vsadd_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vsadd.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -525,7 +537,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -555,13 +567,14 @@ , , , + i32, i32); define @intrinsic_vsadd_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vsadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -600,12 +613,13 @@ , , , + i32, i32); define @intrinsic_vsadd_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -644,12 +658,13 @@ , , , + i32, i32); define @intrinsic_vsadd_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -658,7 +673,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -688,12 +703,13 @@ , , , + i32, i32); define @intrinsic_vsadd_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vsadd.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -702,7 +718,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -732,12 +748,13 @@ , , , + i32, i32); define @intrinsic_vsadd_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vsadd.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +763,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -776,13 +793,14 @@ , , , + i32, i32); define @intrinsic_vsadd_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vsadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +809,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -821,12 +839,13 @@ , , , + i32, i32); define @intrinsic_vsadd_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +854,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -865,12 +884,13 @@ , , , + i32, i32); define @intrinsic_vsadd_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vsadd.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +899,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -909,12 +929,13 @@ , , , + i32, i32); define @intrinsic_vsadd_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vsadd.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +944,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -953,13 +974,14 @@ , , , + i32, i32); define @intrinsic_vsadd_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vsadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -968,7 +990,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -998,12 +1020,13 @@ , i8, , + i32, i32); define @intrinsic_vsadd_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1012,7 +1035,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1042,12 +1065,13 @@ , i8, , + i32, i32); define @intrinsic_vsadd_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1056,7 +1080,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1086,12 +1110,13 @@ , i8, , + i32, i32); define @intrinsic_vsadd_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1100,7 +1125,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1130,12 +1155,13 @@ , i8, , + i32, i32); define @intrinsic_vsadd_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1144,7 +1170,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1174,12 +1200,13 @@ , i8, , + i32, i32); define @intrinsic_vsadd_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsadd.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1188,7 +1215,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1218,12 +1245,13 @@ , i8, , + i32, i32); define @intrinsic_vsadd_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vsadd.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1232,7 +1260,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1262,12 +1290,13 @@ , i8, , + i32, i32); define @intrinsic_vsadd_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vsadd.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1276,7 +1305,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1306,12 +1335,13 @@ , i16, , + i32, i32); define @intrinsic_vsadd_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1320,7 +1350,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1350,12 +1380,13 @@ , i16, , + i32, i32); define @intrinsic_vsadd_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1364,7 +1395,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1394,12 +1425,13 @@ , i16, , + i32, i32); define @intrinsic_vsadd_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1408,7 +1440,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1438,12 +1470,13 @@ , i16, , + i32, i32); define @intrinsic_vsadd_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsadd.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1452,7 +1485,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1482,12 +1515,13 @@ , i16, , + i32, i32); define @intrinsic_vsadd_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsadd.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1496,7 +1530,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1526,12 +1560,13 @@ , i16, , + i32, i32); define @intrinsic_vsadd_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vsadd.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1540,7 +1575,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1570,12 +1605,13 @@ , i32, , + i32, i32); define @intrinsic_vsadd_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1584,7 +1620,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1614,12 +1650,13 @@ , i32, , + i32, i32); define @intrinsic_vsadd_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1628,7 +1665,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1658,12 +1695,13 @@ , i32, , + i32, i32); define @intrinsic_vsadd_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsadd.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1672,7 +1710,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1702,12 +1740,13 @@ , i32, , + i32, i32); define @intrinsic_vsadd_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsadd.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1716,7 +1755,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1746,12 +1785,13 @@ , i32, , + i32, i32); define @intrinsic_vsadd_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vsadd.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1760,7 +1800,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1796,6 +1836,7 @@ , i64, , + i32, i32); define @intrinsic_vsadd_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1804,10 +1845,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v25, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vsadd.vv v8, v9, v25, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1817,7 +1858,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1853,6 +1894,7 @@ , i64, , + i32, i32); define @intrinsic_vsadd_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1861,10 +1903,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v26, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vsadd.vv v8, v10, v26, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1874,7 +1916,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1910,6 +1952,7 @@ , i64, , + i32, i32); define @intrinsic_vsadd_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1918,10 +1961,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v28, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vsadd.vv v8, v12, v28, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1931,7 +1974,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1967,6 +2010,7 @@ , i64, , + i32, i32); define @intrinsic_vsadd_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1975,10 +2019,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v24, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vsadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1988,7 +2032,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2011,7 +2055,7 @@ define @intrinsic_vsadd_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vsadd.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2020,7 +2064,7 @@ %1, i8 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2043,7 +2087,7 @@ define @intrinsic_vsadd_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vsadd.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2052,7 +2096,7 @@ %1, i8 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2075,7 +2119,7 @@ define @intrinsic_vsadd_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vsadd.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2084,7 +2128,7 @@ %1, i8 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2107,7 +2151,7 @@ define @intrinsic_vsadd_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vsadd.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2116,7 +2160,7 @@ %1, i8 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2139,7 +2183,7 @@ define @intrinsic_vsadd_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vsadd.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2148,7 +2192,7 @@ %1, i8 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2171,7 +2215,7 @@ define @intrinsic_vsadd_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vsadd.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2180,7 +2224,7 @@ %1, i8 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2203,7 +2247,7 @@ define @intrinsic_vsadd_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vsadd.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2212,7 +2256,7 @@ %1, i8 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2235,7 +2279,7 @@ define @intrinsic_vsadd_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vsadd.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2244,7 +2288,7 @@ %1, i16 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2267,7 +2311,7 @@ define @intrinsic_vsadd_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vsadd.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2276,7 +2320,7 @@ %1, i16 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2299,7 +2343,7 @@ define @intrinsic_vsadd_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vsadd.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2308,7 +2352,7 @@ %1, i16 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2331,7 +2375,7 @@ define @intrinsic_vsadd_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vsadd.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2340,7 +2384,7 @@ %1, i16 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2363,7 +2407,7 @@ define @intrinsic_vsadd_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vsadd.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2372,7 +2416,7 @@ %1, i16 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2395,7 +2439,7 @@ define @intrinsic_vsadd_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vsadd.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2404,7 +2448,7 @@ %1, i16 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2427,7 +2471,7 @@ define @intrinsic_vsadd_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vsadd.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2436,7 +2480,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2459,7 +2503,7 @@ define @intrinsic_vsadd_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vsadd.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2468,7 +2512,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2491,7 +2535,7 @@ define @intrinsic_vsadd_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vsadd.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2500,7 +2544,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2523,7 +2567,7 @@ define @intrinsic_vsadd_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vsadd.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2532,7 +2576,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2555,7 +2599,7 @@ define @intrinsic_vsadd_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vsadd.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2564,7 +2608,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2587,7 +2631,7 @@ define @intrinsic_vsadd_mask_vi_nxv1i64_nxv1i64_i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vsadd.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2596,7 +2640,7 @@ %1, i64 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2619,7 +2663,7 @@ define @intrinsic_vsadd_mask_vi_nxv2i64_nxv2i64_i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vsadd.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2628,7 +2672,7 @@ %1, i64 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2651,7 +2695,7 @@ define @intrinsic_vsadd_mask_vi_nxv4i64_nxv4i64_i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vsadd.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2660,7 +2704,7 @@ %1, i64 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2683,7 +2727,7 @@ define @intrinsic_vsadd_mask_vi_nxv8i64_nxv8i64_i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vsadd.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2692,7 +2736,7 @@ %1, i64 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vsadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsadd-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsadd-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsadd-rv64.ll @@ -26,12 +26,13 @@ , , , + i64, i64); define @intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -70,12 +71,13 @@ , , , + i64, i64); define @intrinsic_vsadd_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -114,12 +116,13 @@ , , , + i64, i64); define @intrinsic_vsadd_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -158,12 +161,13 @@ , , , + i64, i64); define @intrinsic_vsadd_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -202,12 +206,13 @@ , , , + i64, i64); define @intrinsic_vsadd_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vsadd.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -246,12 +251,13 @@ , , , + i64, i64); define @intrinsic_vsadd_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vsadd.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -260,7 +266,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -290,13 +296,14 @@ , , , + i64, i64); define @intrinsic_vsadd_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vsadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -335,12 +342,13 @@ , , , + i64, i64); define @intrinsic_vsadd_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -379,12 +387,13 @@ , , , + i64, i64); define @intrinsic_vsadd_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -423,12 +432,13 @@ , , , + i64, i64); define @intrinsic_vsadd_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -467,12 +477,13 @@ , , , + i64, i64); define @intrinsic_vsadd_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vsadd.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -481,7 +492,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -511,12 +522,13 @@ , , , + i64, i64); define @intrinsic_vsadd_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vsadd.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -525,7 +537,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -555,13 +567,14 @@ , , , + i64, i64); define @intrinsic_vsadd_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vsadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -600,12 +613,13 @@ , , , + i64, i64); define @intrinsic_vsadd_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -644,12 +658,13 @@ , , , + i64, i64); define @intrinsic_vsadd_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -658,7 +673,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -688,12 +703,13 @@ , , , + i64, i64); define @intrinsic_vsadd_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vsadd.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -702,7 +718,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -732,12 +748,13 @@ , , , + i64, i64); define @intrinsic_vsadd_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vsadd.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +763,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -776,13 +793,14 @@ , , , + i64, i64); define @intrinsic_vsadd_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vsadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +809,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -821,12 +839,13 @@ , , , + i64, i64); define @intrinsic_vsadd_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +854,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -865,12 +884,13 @@ , , , + i64, i64); define @intrinsic_vsadd_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vsadd.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +899,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -909,12 +929,13 @@ , , , + i64, i64); define @intrinsic_vsadd_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vsadd.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +944,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -953,13 +974,14 @@ , , , + i64, i64); define @intrinsic_vsadd_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vsadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -968,7 +990,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -998,12 +1020,13 @@ , i8, , + i64, i64); define @intrinsic_vsadd_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1012,7 +1035,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1042,12 +1065,13 @@ , i8, , + i64, i64); define @intrinsic_vsadd_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1056,7 +1080,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1086,12 +1110,13 @@ , i8, , + i64, i64); define @intrinsic_vsadd_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1100,7 +1125,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1130,12 +1155,13 @@ , i8, , + i64, i64); define @intrinsic_vsadd_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1144,7 +1170,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1174,12 +1200,13 @@ , i8, , + i64, i64); define @intrinsic_vsadd_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsadd.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1188,7 +1215,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1218,12 +1245,13 @@ , i8, , + i64, i64); define @intrinsic_vsadd_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vsadd.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1232,7 +1260,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1262,12 +1290,13 @@ , i8, , + i64, i64); define @intrinsic_vsadd_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vsadd.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1276,7 +1305,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1306,12 +1335,13 @@ , i16, , + i64, i64); define @intrinsic_vsadd_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1320,7 +1350,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1350,12 +1380,13 @@ , i16, , + i64, i64); define @intrinsic_vsadd_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1364,7 +1395,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1394,12 +1425,13 @@ , i16, , + i64, i64); define @intrinsic_vsadd_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1408,7 +1440,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1438,12 +1470,13 @@ , i16, , + i64, i64); define @intrinsic_vsadd_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsadd.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1452,7 +1485,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1482,12 +1515,13 @@ , i16, , + i64, i64); define @intrinsic_vsadd_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsadd.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1496,7 +1530,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1526,12 +1560,13 @@ , i16, , + i64, i64); define @intrinsic_vsadd_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vsadd.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1540,7 +1575,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1570,12 +1605,13 @@ , i32, , + i64, i64); define @intrinsic_vsadd_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1584,7 +1620,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1614,12 +1650,13 @@ , i32, , + i64, i64); define @intrinsic_vsadd_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1628,7 +1665,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1658,12 +1695,13 @@ , i32, , + i64, i64); define @intrinsic_vsadd_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsadd.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1672,7 +1710,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1702,12 +1740,13 @@ , i32, , + i64, i64); define @intrinsic_vsadd_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsadd.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1716,7 +1755,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1746,12 +1785,13 @@ , i32, , + i64, i64); define @intrinsic_vsadd_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vsadd.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1760,7 +1800,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1790,12 +1830,13 @@ , i64, , + i64, i64); define @intrinsic_vsadd_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1804,7 +1845,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1834,12 +1875,13 @@ , i64, , + i64, i64); define @intrinsic_vsadd_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsadd.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1848,7 +1890,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1878,12 +1920,13 @@ , i64, , + i64, i64); define @intrinsic_vsadd_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsadd.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1892,7 +1935,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1922,12 +1965,13 @@ , i64, , + i64, i64); define @intrinsic_vsadd_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vsadd.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1936,7 +1980,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1959,7 +2003,7 @@ define @intrinsic_vsadd_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vsadd.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1968,7 +2012,7 @@ %1, i8 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1991,7 +2035,7 @@ define @intrinsic_vsadd_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vsadd.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2000,7 +2044,7 @@ %1, i8 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2023,7 +2067,7 @@ define @intrinsic_vsadd_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vsadd.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2032,7 +2076,7 @@ %1, i8 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2055,7 +2099,7 @@ define @intrinsic_vsadd_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vsadd.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2064,7 +2108,7 @@ %1, i8 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2087,7 +2131,7 @@ define @intrinsic_vsadd_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vsadd.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2096,7 +2140,7 @@ %1, i8 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2119,7 +2163,7 @@ define @intrinsic_vsadd_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vsadd.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2128,7 +2172,7 @@ %1, i8 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2151,7 +2195,7 @@ define @intrinsic_vsadd_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vsadd.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2160,7 +2204,7 @@ %1, i8 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2183,7 +2227,7 @@ define @intrinsic_vsadd_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vsadd.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2192,7 +2236,7 @@ %1, i16 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2215,7 +2259,7 @@ define @intrinsic_vsadd_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vsadd.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2224,7 +2268,7 @@ %1, i16 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2247,7 +2291,7 @@ define @intrinsic_vsadd_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vsadd.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2256,7 +2300,7 @@ %1, i16 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2279,7 +2323,7 @@ define @intrinsic_vsadd_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vsadd.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2288,7 +2332,7 @@ %1, i16 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2311,7 +2355,7 @@ define @intrinsic_vsadd_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vsadd.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2320,7 +2364,7 @@ %1, i16 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2343,7 +2387,7 @@ define @intrinsic_vsadd_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vsadd.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2352,7 +2396,7 @@ %1, i16 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2375,7 +2419,7 @@ define @intrinsic_vsadd_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vsadd.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2384,7 +2428,7 @@ %1, i32 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2407,7 +2451,7 @@ define @intrinsic_vsadd_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vsadd.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2416,7 +2460,7 @@ %1, i32 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2439,7 +2483,7 @@ define @intrinsic_vsadd_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vsadd.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2448,7 +2492,7 @@ %1, i32 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2471,7 +2515,7 @@ define @intrinsic_vsadd_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vsadd.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2480,7 +2524,7 @@ %1, i32 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2503,7 +2547,7 @@ define @intrinsic_vsadd_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vsadd.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2512,7 +2556,7 @@ %1, i32 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2535,7 +2579,7 @@ define @intrinsic_vsadd_mask_vi_nxv1i64_nxv1i64_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vsadd.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2544,7 +2588,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2567,7 +2611,7 @@ define @intrinsic_vsadd_mask_vi_nxv2i64_nxv2i64_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vsadd.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2576,7 +2620,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2599,7 +2643,7 @@ define @intrinsic_vsadd_mask_vi_nxv4i64_nxv4i64_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vsadd.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2608,7 +2652,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2631,7 +2675,7 @@ define @intrinsic_vsadd_mask_vi_nxv8i64_nxv8i64_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vsadd.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2640,7 +2684,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll @@ -26,12 +26,13 @@ , , , + i32, i32); define @intrinsic_vsaddu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -70,12 +71,13 @@ , , , + i32, i32); define @intrinsic_vsaddu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -114,12 +116,13 @@ , , , + i32, i32); define @intrinsic_vsaddu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -158,12 +161,13 @@ , , , + i32, i32); define @intrinsic_vsaddu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -202,12 +206,13 @@ , , , + i32, i32); define @intrinsic_vsaddu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -246,12 +251,13 @@ , , , + i32, i32); define @intrinsic_vsaddu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -260,7 +266,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -290,13 +296,14 @@ , , , + i32, i32); define @intrinsic_vsaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -335,12 +342,13 @@ , , , + i32, i32); define @intrinsic_vsaddu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -379,12 +387,13 @@ , , , + i32, i32); define @intrinsic_vsaddu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -423,12 +432,13 @@ , , , + i32, i32); define @intrinsic_vsaddu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -467,12 +477,13 @@ , , , + i32, i32); define @intrinsic_vsaddu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -481,7 +492,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -511,12 +522,13 @@ , , , + i32, i32); define @intrinsic_vsaddu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -525,7 +537,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -555,13 +567,14 @@ , , , + i32, i32); define @intrinsic_vsaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -600,12 +613,13 @@ , , , + i32, i32); define @intrinsic_vsaddu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -644,12 +658,13 @@ , , , + i32, i32); define @intrinsic_vsaddu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -658,7 +673,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -688,12 +703,13 @@ , , , + i32, i32); define @intrinsic_vsaddu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -702,7 +718,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -732,12 +748,13 @@ , , , + i32, i32); define @intrinsic_vsaddu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +763,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -776,13 +793,14 @@ , , , + i32, i32); define @intrinsic_vsaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +809,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -821,12 +839,13 @@ , , , + i32, i32); define @intrinsic_vsaddu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +854,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -865,12 +884,13 @@ , , , + i32, i32); define @intrinsic_vsaddu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +899,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -909,12 +929,13 @@ , , , + i32, i32); define @intrinsic_vsaddu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +944,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -953,13 +974,14 @@ , , , + i32, i32); define @intrinsic_vsaddu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -968,7 +990,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -998,12 +1020,13 @@ , i8, , + i32, i32); define @intrinsic_vsaddu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1012,7 +1035,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1042,12 +1065,13 @@ , i8, , + i32, i32); define @intrinsic_vsaddu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1056,7 +1080,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1086,12 +1110,13 @@ , i8, , + i32, i32); define @intrinsic_vsaddu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1100,7 +1125,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1130,12 +1155,13 @@ , i8, , + i32, i32); define @intrinsic_vsaddu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1144,7 +1170,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1174,12 +1200,13 @@ , i8, , + i32, i32); define @intrinsic_vsaddu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1188,7 +1215,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1218,12 +1245,13 @@ , i8, , + i32, i32); define @intrinsic_vsaddu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1232,7 +1260,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1262,12 +1290,13 @@ , i8, , + i32, i32); define @intrinsic_vsaddu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1276,7 +1305,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1306,12 +1335,13 @@ , i16, , + i32, i32); define @intrinsic_vsaddu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1320,7 +1350,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1350,12 +1380,13 @@ , i16, , + i32, i32); define @intrinsic_vsaddu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1364,7 +1395,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1394,12 +1425,13 @@ , i16, , + i32, i32); define @intrinsic_vsaddu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1408,7 +1440,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1438,12 +1470,13 @@ , i16, , + i32, i32); define @intrinsic_vsaddu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1452,7 +1485,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1482,12 +1515,13 @@ , i16, , + i32, i32); define @intrinsic_vsaddu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1496,7 +1530,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1526,12 +1560,13 @@ , i16, , + i32, i32); define @intrinsic_vsaddu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1540,7 +1575,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1570,12 +1605,13 @@ , i32, , + i32, i32); define @intrinsic_vsaddu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1584,7 +1620,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1614,12 +1650,13 @@ , i32, , + i32, i32); define @intrinsic_vsaddu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1628,7 +1665,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1658,12 +1695,13 @@ , i32, , + i32, i32); define @intrinsic_vsaddu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1672,7 +1710,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1702,12 +1740,13 @@ , i32, , + i32, i32); define @intrinsic_vsaddu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1716,7 +1755,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1746,12 +1785,13 @@ , i32, , + i32, i32); define @intrinsic_vsaddu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1760,7 +1800,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1796,6 +1836,7 @@ , i64, , + i32, i32); define @intrinsic_vsaddu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1804,10 +1845,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v25, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v9, v25, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1817,7 +1858,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1853,6 +1894,7 @@ , i64, , + i32, i32); define @intrinsic_vsaddu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1861,10 +1903,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v26, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v10, v26, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1874,7 +1916,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1910,6 +1952,7 @@ , i64, , + i32, i32); define @intrinsic_vsaddu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1918,10 +1961,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v28, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v12, v28, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1931,7 +1974,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1967,6 +2010,7 @@ , i64, , + i32, i32); define @intrinsic_vsaddu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1975,10 +2019,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v24, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1988,7 +2032,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2011,7 +2055,7 @@ define @intrinsic_vsaddu_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2020,7 +2064,7 @@ %1, i8 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2043,7 +2087,7 @@ define @intrinsic_vsaddu_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2052,7 +2096,7 @@ %1, i8 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2075,7 +2119,7 @@ define @intrinsic_vsaddu_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2084,7 +2128,7 @@ %1, i8 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2107,7 +2151,7 @@ define @intrinsic_vsaddu_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2116,7 +2160,7 @@ %1, i8 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2139,7 +2183,7 @@ define @intrinsic_vsaddu_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2148,7 +2192,7 @@ %1, i8 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2171,7 +2215,7 @@ define @intrinsic_vsaddu_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2180,7 +2224,7 @@ %1, i8 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2203,7 +2247,7 @@ define @intrinsic_vsaddu_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2212,7 +2256,7 @@ %1, i8 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2235,7 +2279,7 @@ define @intrinsic_vsaddu_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2244,7 +2288,7 @@ %1, i16 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2267,7 +2311,7 @@ define @intrinsic_vsaddu_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2276,7 +2320,7 @@ %1, i16 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2299,7 +2343,7 @@ define @intrinsic_vsaddu_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2308,7 +2352,7 @@ %1, i16 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2331,7 +2375,7 @@ define @intrinsic_vsaddu_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2340,7 +2384,7 @@ %1, i16 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2363,7 +2407,7 @@ define @intrinsic_vsaddu_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2372,7 +2416,7 @@ %1, i16 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2395,7 +2439,7 @@ define @intrinsic_vsaddu_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2404,7 +2448,7 @@ %1, i16 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2427,7 +2471,7 @@ define @intrinsic_vsaddu_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2436,7 +2480,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2459,7 +2503,7 @@ define @intrinsic_vsaddu_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2468,7 +2512,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2491,7 +2535,7 @@ define @intrinsic_vsaddu_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2500,7 +2544,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2523,7 +2567,7 @@ define @intrinsic_vsaddu_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2532,7 +2576,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2555,7 +2599,7 @@ define @intrinsic_vsaddu_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2564,7 +2608,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2587,7 +2631,7 @@ define @intrinsic_vsaddu_mask_vi_nxv1i64_nxv1i64_i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2596,7 +2640,7 @@ %1, i64 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2619,7 +2663,7 @@ define @intrinsic_vsaddu_mask_vi_nxv2i64_nxv2i64_i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2628,7 +2672,7 @@ %1, i64 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2651,7 +2695,7 @@ define @intrinsic_vsaddu_mask_vi_nxv4i64_nxv4i64_i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2660,7 +2704,7 @@ %1, i64 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2683,7 +2727,7 @@ define @intrinsic_vsaddu_mask_vi_nxv8i64_nxv8i64_i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2692,7 +2736,7 @@ %1, i64 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv64.ll @@ -26,12 +26,13 @@ , , , + i64, i64); define @intrinsic_vsaddu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -70,12 +71,13 @@ , , , + i64, i64); define @intrinsic_vsaddu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -114,12 +116,13 @@ , , , + i64, i64); define @intrinsic_vsaddu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -158,12 +161,13 @@ , , , + i64, i64); define @intrinsic_vsaddu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -202,12 +206,13 @@ , , , + i64, i64); define @intrinsic_vsaddu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -246,12 +251,13 @@ , , , + i64, i64); define @intrinsic_vsaddu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -260,7 +266,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -290,13 +296,14 @@ , , , + i64, i64); define @intrinsic_vsaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -335,12 +342,13 @@ , , , + i64, i64); define @intrinsic_vsaddu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -379,12 +387,13 @@ , , , + i64, i64); define @intrinsic_vsaddu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -423,12 +432,13 @@ , , , + i64, i64); define @intrinsic_vsaddu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -467,12 +477,13 @@ , , , + i64, i64); define @intrinsic_vsaddu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -481,7 +492,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -511,12 +522,13 @@ , , , + i64, i64); define @intrinsic_vsaddu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -525,7 +537,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -555,13 +567,14 @@ , , , + i64, i64); define @intrinsic_vsaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -600,12 +613,13 @@ , , , + i64, i64); define @intrinsic_vsaddu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -644,12 +658,13 @@ , , , + i64, i64); define @intrinsic_vsaddu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -658,7 +673,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -688,12 +703,13 @@ , , , + i64, i64); define @intrinsic_vsaddu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -702,7 +718,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -732,12 +748,13 @@ , , , + i64, i64); define @intrinsic_vsaddu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +763,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -776,13 +793,14 @@ , , , + i64, i64); define @intrinsic_vsaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +809,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -821,12 +839,13 @@ , , , + i64, i64); define @intrinsic_vsaddu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +854,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -865,12 +884,13 @@ , , , + i64, i64); define @intrinsic_vsaddu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +899,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -909,12 +929,13 @@ , , , + i64, i64); define @intrinsic_vsaddu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +944,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -953,13 +974,14 @@ , , , + i64, i64); define @intrinsic_vsaddu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -968,7 +990,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -998,12 +1020,13 @@ , i8, , + i64, i64); define @intrinsic_vsaddu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1012,7 +1035,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1042,12 +1065,13 @@ , i8, , + i64, i64); define @intrinsic_vsaddu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1056,7 +1080,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1086,12 +1110,13 @@ , i8, , + i64, i64); define @intrinsic_vsaddu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1100,7 +1125,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1130,12 +1155,13 @@ , i8, , + i64, i64); define @intrinsic_vsaddu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1144,7 +1170,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1174,12 +1200,13 @@ , i8, , + i64, i64); define @intrinsic_vsaddu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1188,7 +1215,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1218,12 +1245,13 @@ , i8, , + i64, i64); define @intrinsic_vsaddu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1232,7 +1260,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1262,12 +1290,13 @@ , i8, , + i64, i64); define @intrinsic_vsaddu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1276,7 +1305,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1306,12 +1335,13 @@ , i16, , + i64, i64); define @intrinsic_vsaddu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1320,7 +1350,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1350,12 +1380,13 @@ , i16, , + i64, i64); define @intrinsic_vsaddu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1364,7 +1395,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1394,12 +1425,13 @@ , i16, , + i64, i64); define @intrinsic_vsaddu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1408,7 +1440,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1438,12 +1470,13 @@ , i16, , + i64, i64); define @intrinsic_vsaddu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1452,7 +1485,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1482,12 +1515,13 @@ , i16, , + i64, i64); define @intrinsic_vsaddu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1496,7 +1530,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1526,12 +1560,13 @@ , i16, , + i64, i64); define @intrinsic_vsaddu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1540,7 +1575,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1570,12 +1605,13 @@ , i32, , + i64, i64); define @intrinsic_vsaddu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1584,7 +1620,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1614,12 +1650,13 @@ , i32, , + i64, i64); define @intrinsic_vsaddu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1628,7 +1665,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1658,12 +1695,13 @@ , i32, , + i64, i64); define @intrinsic_vsaddu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1672,7 +1710,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1702,12 +1740,13 @@ , i32, , + i64, i64); define @intrinsic_vsaddu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1716,7 +1755,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1746,12 +1785,13 @@ , i32, , + i64, i64); define @intrinsic_vsaddu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1760,7 +1800,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1790,12 +1830,13 @@ , i64, , + i64, i64); define @intrinsic_vsaddu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1804,7 +1845,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1834,12 +1875,13 @@ , i64, , + i64, i64); define @intrinsic_vsaddu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1848,7 +1890,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1878,12 +1920,13 @@ , i64, , + i64, i64); define @intrinsic_vsaddu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1892,7 +1935,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1922,12 +1965,13 @@ , i64, , + i64, i64); define @intrinsic_vsaddu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1936,7 +1980,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1959,7 +2003,7 @@ define @intrinsic_vsaddu_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1968,7 +2012,7 @@ %1, i8 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1991,7 +2035,7 @@ define @intrinsic_vsaddu_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2000,7 +2044,7 @@ %1, i8 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2023,7 +2067,7 @@ define @intrinsic_vsaddu_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2032,7 +2076,7 @@ %1, i8 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2055,7 +2099,7 @@ define @intrinsic_vsaddu_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2064,7 +2108,7 @@ %1, i8 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2087,7 +2131,7 @@ define @intrinsic_vsaddu_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2096,7 +2140,7 @@ %1, i8 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2119,7 +2163,7 @@ define @intrinsic_vsaddu_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2128,7 +2172,7 @@ %1, i8 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2151,7 +2195,7 @@ define @intrinsic_vsaddu_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2160,7 +2204,7 @@ %1, i8 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2183,7 +2227,7 @@ define @intrinsic_vsaddu_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2192,7 +2236,7 @@ %1, i16 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2215,7 +2259,7 @@ define @intrinsic_vsaddu_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2224,7 +2268,7 @@ %1, i16 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2247,7 +2291,7 @@ define @intrinsic_vsaddu_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2256,7 +2300,7 @@ %1, i16 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2279,7 +2323,7 @@ define @intrinsic_vsaddu_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2288,7 +2332,7 @@ %1, i16 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2311,7 +2355,7 @@ define @intrinsic_vsaddu_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2320,7 +2364,7 @@ %1, i16 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2343,7 +2387,7 @@ define @intrinsic_vsaddu_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2352,7 +2396,7 @@ %1, i16 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2375,7 +2419,7 @@ define @intrinsic_vsaddu_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2384,7 +2428,7 @@ %1, i32 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2407,7 +2451,7 @@ define @intrinsic_vsaddu_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2416,7 +2460,7 @@ %1, i32 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2439,7 +2483,7 @@ define @intrinsic_vsaddu_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2448,7 +2492,7 @@ %1, i32 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2471,7 +2515,7 @@ define @intrinsic_vsaddu_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2480,7 +2524,7 @@ %1, i32 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2503,7 +2547,7 @@ define @intrinsic_vsaddu_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2512,7 +2556,7 @@ %1, i32 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2535,7 +2579,7 @@ define @intrinsic_vsaddu_mask_vi_nxv1i64_nxv1i64_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2544,7 +2588,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2567,7 +2611,7 @@ define @intrinsic_vsaddu_mask_vi_nxv2i64_nxv2i64_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2576,7 +2620,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2599,7 +2643,7 @@ define @intrinsic_vsaddu_mask_vi_nxv4i64_nxv4i64_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2608,7 +2652,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2631,7 +2675,7 @@ define @intrinsic_vsaddu_mask_vi_nxv8i64_nxv8i64_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2640,7 +2684,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll @@ -11,7 +11,7 @@ , *, , - i64) + i64, i64) define @test1(i64 %avl, %a, %b) nounwind { ; CHECK-LABEL: test1: @@ -46,7 +46,7 @@ define @test3(i64 %avl, %a, * %b, %c) nounwind { ; CHECK-LABEL: test3: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli a0, a0, e64, m1, ta, mu ; CHECK-NEXT: vle64.v v8, (a1), v0.t ; CHECK-NEXT: ret entry: @@ -55,7 +55,7 @@ %a, * %b, %c, - i64 %0) + i64 %0, i64 1) ret %1 } @@ -63,7 +63,7 @@ define @test4(i64 %avl, %a, * %b, %c) nounwind { ; CHECK-LABEL: test4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli a0, a0, e64, m1, ta, mu ; CHECK-NEXT: vle64.v v8, (a1), v0.t ; CHECK-NEXT: ret entry: @@ -72,7 +72,7 @@ %a, * %b, %c, - i64 %avl) + i64 %avl, i64 1) ret %1 } diff --git a/llvm/test/CodeGen/RISCV/rvv/vsext-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsext-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsext-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsext-rv32.ll @@ -24,12 +24,13 @@ , , , + i32, i32); define @intrinsic_vsext_mask_vf8_nxv1i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vsext.vf8 v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -37,7 +38,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -65,12 +66,13 @@ , , , + i32, i32); define @intrinsic_vsext_mask_vf8_nxv2i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf8_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vsext.vf8 v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -78,7 +80,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -106,12 +108,13 @@ , , , + i32, i32); define @intrinsic_vsext_mask_vf8_nxv4i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf8_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vsext.vf8 v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -119,7 +122,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -147,12 +150,13 @@ , , , + i32, i32); define @intrinsic_vsext_mask_vf8_nxv8i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf8_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vsext.vf8 v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -160,7 +164,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -188,12 +192,13 @@ , , , + i32, i32); define @intrinsic_vsext_mask_vf4_nxv1i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf4_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vsext.vf4 v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -201,7 +206,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -229,12 +234,13 @@ , , , + i32, i32); define @intrinsic_vsext_mask_vf4_nxv2i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf4_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vsext.vf4 v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -242,7 +248,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -270,12 +276,13 @@ , , , + i32, i32); define @intrinsic_vsext_mask_vf4_nxv4i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf4_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vsext.vf4 v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -283,7 +290,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -311,12 +318,13 @@ , , , + i32, i32); define @intrinsic_vsext_mask_vf4_nxv8i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf4_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vsext.vf4 v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -324,7 +332,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -352,12 +360,13 @@ , , , + i32, i32); define @intrinsic_vsext_mask_vf4_nxv1i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf4_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vsext.vf4 v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -365,7 +374,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -393,12 +402,13 @@ , , , + i32, i32); define @intrinsic_vsext_mask_vf4_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf4_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vsext.vf4 v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -406,7 +416,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -434,12 +444,13 @@ , , , + i32, i32); define @intrinsic_vsext_mask_vf4_nxv4i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf4_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vsext.vf4 v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -447,7 +458,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -475,12 +486,13 @@ , , , + i32, i32); define @intrinsic_vsext_mask_vf4_nxv8i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf4_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vsext.vf4 v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -488,7 +500,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -516,12 +528,13 @@ , , , + i32, i32); define @intrinsic_vsext_mask_vf4_nxv16i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf4_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vsext.vf4 v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -529,7 +542,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -557,12 +570,13 @@ , , , + i32, i32); define @intrinsic_vsext_mask_vf2_nxv1i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vsext.vf2 v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +584,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -598,12 +612,13 @@ , , , + i32, i32); define @intrinsic_vsext_mask_vf2_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vsext.vf2 v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -611,7 +626,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -639,12 +654,13 @@ , , , + i32, i32); define @intrinsic_vsext_mask_vf2_nxv4i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vsext.vf2 v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -652,7 +668,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -680,12 +696,13 @@ , , , + i32, i32); define @intrinsic_vsext_mask_vf2_nxv8i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vsext.vf2 v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -693,7 +710,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -721,12 +738,13 @@ , , , + i32, i32); define @intrinsic_vsext_mask_vf2_nxv16i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vsext.vf2 v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -734,7 +752,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -762,12 +780,13 @@ , , , + i32, i32); define @intrinsic_vsext_mask_vf2_nxv1i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vsext.vf2 v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -775,7 +794,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -803,12 +822,13 @@ , , , + i32, i32); define @intrinsic_vsext_mask_vf2_nxv2i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vsext.vf2 v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -816,7 +836,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -844,12 +864,13 @@ , , , + i32, i32); define @intrinsic_vsext_mask_vf2_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vsext.vf2 v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -857,7 +878,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -885,12 +906,13 @@ , , , + i32, i32); define @intrinsic_vsext_mask_vf2_nxv8i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vsext.vf2 v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -898,7 +920,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -926,12 +948,13 @@ , , , + i32, i32); define @intrinsic_vsext_mask_vf2_nxv16i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vsext.vf2 v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -939,7 +962,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -967,12 +990,13 @@ , , , + i32, i32); define @intrinsic_vsext_mask_vf2_nxv32i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vsext.vf2 v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -980,7 +1004,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vsext-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsext-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsext-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsext-rv64.ll @@ -24,12 +24,13 @@ , , , + i64, i64); define @intrinsic_vsext_mask_vf8_nxv1i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vsext.vf8 v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -37,7 +38,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -65,12 +66,13 @@ , , , + i64, i64); define @intrinsic_vsext_mask_vf8_nxv2i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf8_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vsext.vf8 v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -78,7 +80,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -106,12 +108,13 @@ , , , + i64, i64); define @intrinsic_vsext_mask_vf8_nxv4i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf8_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vsext.vf8 v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -119,7 +122,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -147,12 +150,13 @@ , , , + i64, i64); define @intrinsic_vsext_mask_vf8_nxv8i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf8_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vsext.vf8 v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -160,7 +164,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -188,12 +192,13 @@ , , , + i64, i64); define @intrinsic_vsext_mask_vf4_nxv1i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf4_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vsext.vf4 v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -201,7 +206,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -229,12 +234,13 @@ , , , + i64, i64); define @intrinsic_vsext_mask_vf4_nxv2i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf4_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vsext.vf4 v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -242,7 +248,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -270,12 +276,13 @@ , , , + i64, i64); define @intrinsic_vsext_mask_vf4_nxv4i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf4_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vsext.vf4 v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -283,7 +290,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -311,12 +318,13 @@ , , , + i64, i64); define @intrinsic_vsext_mask_vf4_nxv8i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf4_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vsext.vf4 v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -324,7 +332,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -352,12 +360,13 @@ , , , + i64, i64); define @intrinsic_vsext_mask_vf4_nxv1i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf4_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vsext.vf4 v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -365,7 +374,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -393,12 +402,13 @@ , , , + i64, i64); define @intrinsic_vsext_mask_vf4_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf4_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vsext.vf4 v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -406,7 +416,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -434,12 +444,13 @@ , , , + i64, i64); define @intrinsic_vsext_mask_vf4_nxv4i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf4_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vsext.vf4 v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -447,7 +458,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -475,12 +486,13 @@ , , , + i64, i64); define @intrinsic_vsext_mask_vf4_nxv8i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf4_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vsext.vf4 v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -488,7 +500,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -516,12 +528,13 @@ , , , + i64, i64); define @intrinsic_vsext_mask_vf4_nxv16i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf4_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vsext.vf4 v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -529,7 +542,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -557,12 +570,13 @@ , , , + i64, i64); define @intrinsic_vsext_mask_vf2_nxv1i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vsext.vf2 v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +584,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -598,12 +612,13 @@ , , , + i64, i64); define @intrinsic_vsext_mask_vf2_nxv2i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vsext.vf2 v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -611,7 +626,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -639,12 +654,13 @@ , , , + i64, i64); define @intrinsic_vsext_mask_vf2_nxv4i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vsext.vf2 v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -652,7 +668,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -680,12 +696,13 @@ , , , + i64, i64); define @intrinsic_vsext_mask_vf2_nxv8i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vsext.vf2 v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -693,7 +710,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -721,12 +738,13 @@ , , , + i64, i64); define @intrinsic_vsext_mask_vf2_nxv1i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vsext.vf2 v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -734,7 +752,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -762,12 +780,13 @@ , , , + i64, i64); define @intrinsic_vsext_mask_vf2_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vsext.vf2 v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -775,7 +794,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -803,12 +822,13 @@ , , , + i64, i64); define @intrinsic_vsext_mask_vf2_nxv4i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vsext.vf2 v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -816,7 +836,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -844,12 +864,13 @@ , , , + i64, i64); define @intrinsic_vsext_mask_vf2_nxv8i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vsext.vf2 v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -857,7 +878,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -885,12 +906,13 @@ , , , + i64, i64); define @intrinsic_vsext_mask_vf2_nxv16i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vsext.vf2 v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -898,7 +920,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -926,12 +948,13 @@ , , , + i64, i64); define @intrinsic_vsext_mask_vf2_nxv1i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vsext.vf2 v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -939,7 +962,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -967,12 +990,13 @@ , , , + i64, i64); define @intrinsic_vsext_mask_vf2_nxv2i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vsext.vf2 v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -980,7 +1004,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1008,12 +1032,13 @@ , , , + i64, i64); define @intrinsic_vsext_mask_vf2_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vsext.vf2 v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -1021,7 +1046,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1049,12 +1074,13 @@ , , , + i64, i64); define @intrinsic_vsext_mask_vf2_nxv8i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vsext.vf2 v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -1062,7 +1088,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1090,12 +1116,13 @@ , , , + i64, i64); define @intrinsic_vsext_mask_vf2_nxv16i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vsext.vf2 v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -1103,7 +1130,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1131,12 +1158,13 @@ , , , + i64, i64); define @intrinsic_vsext_mask_vf2_nxv32i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vsext.vf2 v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -1144,7 +1172,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv32.ll @@ -26,12 +26,13 @@ , i8, , + i32, i32); define @intrinsic_vslide1down_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vslide1down.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -70,12 +71,13 @@ , i8, , + i32, i32); define @intrinsic_vslide1down_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vslide1down.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -114,12 +116,13 @@ , i8, , + i32, i32); define @intrinsic_vslide1down_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vslide1down.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -158,12 +161,13 @@ , i8, , + i32, i32); define @intrinsic_vslide1down_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vslide1down.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -202,12 +206,13 @@ , i8, , + i32, i32); define @intrinsic_vslide1down_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vslide1down.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -246,12 +251,13 @@ , i8, , + i32, i32); define @intrinsic_vslide1down_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vslide1down.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -260,7 +266,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -290,12 +296,13 @@ , i8, , + i32, i32); define @intrinsic_vslide1down_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vslide1down.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -304,7 +311,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -334,12 +341,13 @@ , i16, , + i32, i32); define @intrinsic_vslide1down_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vslide1down.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -348,7 +356,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -378,12 +386,13 @@ , i16, , + i32, i32); define @intrinsic_vslide1down_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vslide1down.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -392,7 +401,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -422,12 +431,13 @@ , i16, , + i32, i32); define @intrinsic_vslide1down_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vslide1down.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -436,7 +446,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -466,12 +476,13 @@ , i16, , + i32, i32); define @intrinsic_vslide1down_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vslide1down.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -480,7 +491,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -510,12 +521,13 @@ , i16, , + i32, i32); define @intrinsic_vslide1down_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vslide1down.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -524,7 +536,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -554,12 +566,13 @@ , i16, , + i32, i32); define @intrinsic_vslide1down_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vslide1down.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -568,7 +581,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -598,12 +611,13 @@ , i32, , + i32, i32); define @intrinsic_vslide1down_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vslide1down.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -612,7 +626,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -642,12 +656,13 @@ , i32, , + i32, i32); define @intrinsic_vslide1down_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vslide1down.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -656,7 +671,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -686,12 +701,13 @@ , i32, , + i32, i32); define @intrinsic_vslide1down_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vslide1down.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -700,7 +716,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -730,12 +746,13 @@ , i32, , + i32, i32); define @intrinsic_vslide1down_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vslide1down.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -744,7 +761,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -774,12 +791,13 @@ , i32, , + i32, i32); define @intrinsic_vslide1down_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vslide1down.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -788,7 +806,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -820,6 +838,7 @@ , i64, , + i32, i32); define @intrinsic_vslide1down_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -838,7 +857,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -870,6 +889,7 @@ , i64, , + i32, i32); define @intrinsic_vslide1down_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -888,7 +908,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -920,6 +940,7 @@ , i64, , + i32, i32); define @intrinsic_vslide1down_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -938,7 +959,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -970,6 +991,7 @@ , i64, , + i32, i32); define @intrinsic_vslide1down_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -988,7 +1010,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv64.ll @@ -26,12 +26,13 @@ , i8, , + i64, i64); define @intrinsic_vslide1down_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vslide1down.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -70,12 +71,13 @@ , i8, , + i64, i64); define @intrinsic_vslide1down_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vslide1down.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -114,12 +116,13 @@ , i8, , + i64, i64); define @intrinsic_vslide1down_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vslide1down.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -158,12 +161,13 @@ , i8, , + i64, i64); define @intrinsic_vslide1down_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vslide1down.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -202,12 +206,13 @@ , i8, , + i64, i64); define @intrinsic_vslide1down_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vslide1down.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -246,12 +251,13 @@ , i8, , + i64, i64); define @intrinsic_vslide1down_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vslide1down.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -260,7 +266,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -290,12 +296,13 @@ , i8, , + i64, i64); define @intrinsic_vslide1down_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vslide1down.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -304,7 +311,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -334,12 +341,13 @@ , i16, , + i64, i64); define @intrinsic_vslide1down_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vslide1down.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -348,7 +356,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -378,12 +386,13 @@ , i16, , + i64, i64); define @intrinsic_vslide1down_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vslide1down.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -392,7 +401,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -422,12 +431,13 @@ , i16, , + i64, i64); define @intrinsic_vslide1down_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vslide1down.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -436,7 +446,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -466,12 +476,13 @@ , i16, , + i64, i64); define @intrinsic_vslide1down_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vslide1down.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -480,7 +491,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -510,12 +521,13 @@ , i16, , + i64, i64); define @intrinsic_vslide1down_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vslide1down.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -524,7 +536,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -554,12 +566,13 @@ , i16, , + i64, i64); define @intrinsic_vslide1down_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vslide1down.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -568,7 +581,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -598,12 +611,13 @@ , i32, , + i64, i64); define @intrinsic_vslide1down_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vslide1down.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -612,7 +626,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -642,12 +656,13 @@ , i32, , + i64, i64); define @intrinsic_vslide1down_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vslide1down.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -656,7 +671,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -686,12 +701,13 @@ , i32, , + i64, i64); define @intrinsic_vslide1down_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vslide1down.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -700,7 +716,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -730,12 +746,13 @@ , i32, , + i64, i64); define @intrinsic_vslide1down_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vslide1down.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -744,7 +761,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -774,12 +791,13 @@ , i32, , + i64, i64); define @intrinsic_vslide1down_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vslide1down.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -788,7 +806,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -818,12 +836,13 @@ , i64, , + i64, i64); define @intrinsic_vslide1down_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vslide1down.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -832,7 +851,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -862,12 +881,13 @@ , i64, , + i64, i64); define @intrinsic_vslide1down_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vslide1down.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -876,7 +896,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -906,12 +926,13 @@ , i64, , + i64, i64); define @intrinsic_vslide1down_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vslide1down.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -920,7 +941,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -950,12 +971,13 @@ , i64, , + i64, i64); define @intrinsic_vslide1down_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vslide1down.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -964,7 +986,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv32.ll @@ -27,12 +27,13 @@ , i8, , + i32, i32); define @intrinsic_vslide1up_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -41,7 +42,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -72,12 +73,13 @@ , i8, , + i32, i32); define @intrinsic_vslide1up_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -86,7 +88,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -117,12 +119,13 @@ , i8, , + i32, i32); define @intrinsic_vslide1up_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -131,7 +134,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -162,12 +165,13 @@ , i8, , + i32, i32); define @intrinsic_vslide1up_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -176,7 +180,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -207,12 +211,13 @@ , i8, , + i32, i32); define @intrinsic_vslide1up_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vslide1up.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -221,7 +226,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -252,12 +257,13 @@ , i8, , + i32, i32); define @intrinsic_vslide1up_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vslide1up.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -266,7 +272,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -297,12 +303,13 @@ , i8, , + i32, i32); define @intrinsic_vslide1up_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vslide1up.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -311,7 +318,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -342,12 +349,13 @@ , i16, , + i32, i32); define @intrinsic_vslide1up_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -356,7 +364,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -387,12 +395,13 @@ , i16, , + i32, i32); define @intrinsic_vslide1up_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -401,7 +410,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -432,12 +441,13 @@ , i16, , + i32, i32); define @intrinsic_vslide1up_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -446,7 +456,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -477,12 +487,13 @@ , i16, , + i32, i32); define @intrinsic_vslide1up_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vslide1up.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -491,7 +502,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -522,12 +533,13 @@ , i16, , + i32, i32); define @intrinsic_vslide1up_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vslide1up.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -536,7 +548,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -567,12 +579,13 @@ , i16, , + i32, i32); define @intrinsic_vslide1up_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vslide1up.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -581,7 +594,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -612,12 +625,13 @@ , i32, , + i32, i32); define @intrinsic_vslide1up_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -626,7 +640,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -657,12 +671,13 @@ , i32, , + i32, i32); define @intrinsic_vslide1up_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -671,7 +686,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -702,12 +717,13 @@ , i32, , + i32, i32); define @intrinsic_vslide1up_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vslide1up.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -716,7 +732,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -747,12 +763,13 @@ , i32, , + i32, i32); define @intrinsic_vslide1up_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vslide1up.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -761,7 +778,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -792,12 +809,13 @@ , i32, , + i32, i32); define @intrinsic_vslide1up_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vslide1up.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -806,7 +824,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -838,6 +856,7 @@ , i64, , + i32, i32); define @intrinsic_vslide1up_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -856,7 +875,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -888,6 +907,7 @@ , i64, , + i32, i32); define @intrinsic_vslide1up_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -906,7 +926,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -938,6 +958,7 @@ , i64, , + i32, i32); define @intrinsic_vslide1up_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -956,7 +977,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -988,6 +1009,7 @@ , i64, , + i32, i32); define @intrinsic_vslide1up_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1006,7 +1028,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv64.ll @@ -27,12 +27,13 @@ , i8, , + i64, i64); define @intrinsic_vslide1up_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -41,7 +42,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -72,12 +73,13 @@ , i8, , + i64, i64); define @intrinsic_vslide1up_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -86,7 +88,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -117,12 +119,13 @@ , i8, , + i64, i64); define @intrinsic_vslide1up_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -131,7 +134,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -162,12 +165,13 @@ , i8, , + i64, i64); define @intrinsic_vslide1up_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -176,7 +180,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -207,12 +211,13 @@ , i8, , + i64, i64); define @intrinsic_vslide1up_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vslide1up.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -221,7 +226,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -252,12 +257,13 @@ , i8, , + i64, i64); define @intrinsic_vslide1up_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vslide1up.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -266,7 +272,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -297,12 +303,13 @@ , i8, , + i64, i64); define @intrinsic_vslide1up_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vslide1up.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -311,7 +318,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -342,12 +349,13 @@ , i16, , + i64, i64); define @intrinsic_vslide1up_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -356,7 +364,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -387,12 +395,13 @@ , i16, , + i64, i64); define @intrinsic_vslide1up_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -401,7 +410,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -432,12 +441,13 @@ , i16, , + i64, i64); define @intrinsic_vslide1up_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -446,7 +456,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -477,12 +487,13 @@ , i16, , + i64, i64); define @intrinsic_vslide1up_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vslide1up.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -491,7 +502,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -522,12 +533,13 @@ , i16, , + i64, i64); define @intrinsic_vslide1up_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vslide1up.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -536,7 +548,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -567,12 +579,13 @@ , i16, , + i64, i64); define @intrinsic_vslide1up_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vslide1up.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -581,7 +594,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -612,12 +625,13 @@ , i32, , + i64, i64); define @intrinsic_vslide1up_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -626,7 +640,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -657,12 +671,13 @@ , i32, , + i64, i64); define @intrinsic_vslide1up_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -671,7 +686,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -702,12 +717,13 @@ , i32, , + i64, i64); define @intrinsic_vslide1up_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vslide1up.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -716,7 +732,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -747,12 +763,13 @@ , i32, , + i64, i64); define @intrinsic_vslide1up_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vslide1up.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -761,7 +778,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -792,12 +809,13 @@ , i32, , + i64, i64); define @intrinsic_vslide1up_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vslide1up.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -806,7 +824,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -837,12 +855,13 @@ , i64, , + i64, i64); define @intrinsic_vslide1up_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -851,7 +870,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -882,12 +901,13 @@ , i64, , + i64, i64); define @intrinsic_vslide1up_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vslide1up.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -896,7 +916,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -927,12 +947,13 @@ , i64, , + i64, i64); define @intrinsic_vslide1up_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vslide1up.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -941,7 +962,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -972,12 +993,13 @@ , i64, , + i64, i64); define @intrinsic_vslide1up_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vslide1up.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -986,7 +1008,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll @@ -26,12 +26,13 @@ , , , + i32, i32); define @intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vsll.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -70,12 +71,13 @@ , , , + i32, i32); define @intrinsic_vsll_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vsll.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -114,12 +116,13 @@ , , , + i32, i32); define @intrinsic_vsll_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vsll.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -158,12 +161,13 @@ , , , + i32, i32); define @intrinsic_vsll_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vsll.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -202,12 +206,13 @@ , , , + i32, i32); define @intrinsic_vsll_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vsll.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -246,12 +251,13 @@ , , , + i32, i32); define @intrinsic_vsll_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vsll.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -260,7 +266,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -290,13 +296,14 @@ , , , + i32, i32); define @intrinsic_vsll_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vsll.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -335,12 +342,13 @@ , , , + i32, i32); define @intrinsic_vsll_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vsll.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -379,12 +387,13 @@ , , , + i32, i32); define @intrinsic_vsll_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vsll.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -423,12 +432,13 @@ , , , + i32, i32); define @intrinsic_vsll_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vsll.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -467,12 +477,13 @@ , , , + i32, i32); define @intrinsic_vsll_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vsll.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -481,7 +492,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -511,12 +522,13 @@ , , , + i32, i32); define @intrinsic_vsll_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vsll.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -525,7 +537,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -555,13 +567,14 @@ , , , + i32, i32); define @intrinsic_vsll_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vsll.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -600,12 +613,13 @@ , , , + i32, i32); define @intrinsic_vsll_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vsll.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -644,12 +658,13 @@ , , , + i32, i32); define @intrinsic_vsll_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vsll.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -658,7 +673,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -688,12 +703,13 @@ , , , + i32, i32); define @intrinsic_vsll_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vsll.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -702,7 +718,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -732,12 +748,13 @@ , , , + i32, i32); define @intrinsic_vsll_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vsll.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +763,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -776,13 +793,14 @@ , , , + i32, i32); define @intrinsic_vsll_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vsll.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +809,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -821,12 +839,13 @@ , , , + i32, i32); define @intrinsic_vsll_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vsll.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +854,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -865,12 +884,13 @@ , , , + i32, i32); define @intrinsic_vsll_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vsll.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +899,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -909,12 +929,13 @@ , , , + i32, i32); define @intrinsic_vsll_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vsll.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +944,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -953,13 +974,14 @@ , , , + i32, i32); define @intrinsic_vsll_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vsll.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -968,7 +990,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -998,12 +1020,13 @@ , i32, , + i32, i32); define @intrinsic_vsll_mask_vx_nxv1i8_nxv1i8( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsll.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1012,7 +1035,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1042,12 +1065,13 @@ , i32, , + i32, i32); define @intrinsic_vsll_mask_vx_nxv2i8_nxv2i8( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsll.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1056,7 +1080,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1086,12 +1110,13 @@ , i32, , + i32, i32); define @intrinsic_vsll_mask_vx_nxv4i8_nxv4i8( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsll.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1100,7 +1125,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1130,12 +1155,13 @@ , i32, , + i32, i32); define @intrinsic_vsll_mask_vx_nxv8i8_nxv8i8( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsll.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1144,7 +1170,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1174,12 +1200,13 @@ , i32, , + i32, i32); define @intrinsic_vsll_mask_vx_nxv16i8_nxv16i8( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsll.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1188,7 +1215,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1218,12 +1245,13 @@ , i32, , + i32, i32); define @intrinsic_vsll_mask_vx_nxv32i8_nxv32i8( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vsll.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1232,7 +1260,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1262,12 +1290,13 @@ , i32, , + i32, i32); define @intrinsic_vsll_mask_vx_nxv64i8_nxv64i8( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vsll.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1276,7 +1305,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1306,12 +1335,13 @@ , i32, , + i32, i32); define @intrinsic_vsll_mask_vx_nxv1i16_nxv1i16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsll.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1320,7 +1350,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1350,12 +1380,13 @@ , i32, , + i32, i32); define @intrinsic_vsll_mask_vx_nxv2i16_nxv2i16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsll.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1364,7 +1395,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1394,12 +1425,13 @@ , i32, , + i32, i32); define @intrinsic_vsll_mask_vx_nxv4i16_nxv4i16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsll.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1408,7 +1440,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1438,12 +1470,13 @@ , i32, , + i32, i32); define @intrinsic_vsll_mask_vx_nxv8i16_nxv8i16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsll.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1452,7 +1485,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1482,12 +1515,13 @@ , i32, , + i32, i32); define @intrinsic_vsll_mask_vx_nxv16i16_nxv16i16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsll.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1496,7 +1530,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1526,12 +1560,13 @@ , i32, , + i32, i32); define @intrinsic_vsll_mask_vx_nxv32i16_nxv32i16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vsll.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1540,7 +1575,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1570,12 +1605,13 @@ , i32, , + i32, i32); define @intrinsic_vsll_mask_vx_nxv1i32_nxv1i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsll.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1584,7 +1620,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1614,12 +1650,13 @@ , i32, , + i32, i32); define @intrinsic_vsll_mask_vx_nxv2i32_nxv2i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsll.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1628,7 +1665,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1658,12 +1695,13 @@ , i32, , + i32, i32); define @intrinsic_vsll_mask_vx_nxv4i32_nxv4i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsll.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1672,7 +1710,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1702,12 +1740,13 @@ , i32, , + i32, i32); define @intrinsic_vsll_mask_vx_nxv8i32_nxv8i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsll.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1716,7 +1755,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1746,12 +1785,13 @@ , i32, , + i32, i32); define @intrinsic_vsll_mask_vx_nxv16i32_nxv16i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vsll.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1760,7 +1800,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1790,12 +1830,13 @@ , i32, , + i32, i32); define @intrinsic_vsll_mask_vx_nxv1i64_nxv1i64( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsll.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1804,7 +1845,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1834,12 +1875,13 @@ , i32, , + i32, i32); define @intrinsic_vsll_mask_vx_nxv2i64_nxv2i64( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsll.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1848,7 +1890,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1878,12 +1920,13 @@ , i32, , + i32, i32); define @intrinsic_vsll_mask_vx_nxv4i64_nxv4i64( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsll.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1892,7 +1935,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1922,12 +1965,13 @@ , i32, , + i32, i32); define @intrinsic_vsll_mask_vx_nxv8i64_nxv8i64( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vsll.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1936,7 +1980,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1959,7 +2003,7 @@ define @intrinsic_vsll_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vsll.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1968,7 +2012,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1991,7 +2035,7 @@ define @intrinsic_vsll_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vsll.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2000,7 +2044,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2023,7 +2067,7 @@ define @intrinsic_vsll_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vsll.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2032,7 +2076,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2055,7 +2099,7 @@ define @intrinsic_vsll_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vsll.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2064,7 +2108,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2087,7 +2131,7 @@ define @intrinsic_vsll_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vsll.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2096,7 +2140,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2119,7 +2163,7 @@ define @intrinsic_vsll_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vsll.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2128,7 +2172,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2151,7 +2195,7 @@ define @intrinsic_vsll_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vsll.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2160,7 +2204,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2183,7 +2227,7 @@ define @intrinsic_vsll_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vsll.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2192,7 +2236,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2215,7 +2259,7 @@ define @intrinsic_vsll_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vsll.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2224,7 +2268,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2247,7 +2291,7 @@ define @intrinsic_vsll_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vsll.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2256,7 +2300,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2279,7 +2323,7 @@ define @intrinsic_vsll_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vsll.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2288,7 +2332,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2311,7 +2355,7 @@ define @intrinsic_vsll_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vsll.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2320,7 +2364,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2343,7 +2387,7 @@ define @intrinsic_vsll_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vsll.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2352,7 +2396,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2375,7 +2419,7 @@ define @intrinsic_vsll_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vsll.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2384,7 +2428,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2407,7 +2451,7 @@ define @intrinsic_vsll_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vsll.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2416,7 +2460,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2439,7 +2483,7 @@ define @intrinsic_vsll_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vsll.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2448,7 +2492,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2471,7 +2515,7 @@ define @intrinsic_vsll_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vsll.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2480,7 +2524,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2503,7 +2547,7 @@ define @intrinsic_vsll_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vsll.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2512,7 +2556,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2535,7 +2579,7 @@ define @intrinsic_vsll_mask_vi_nxv1i64_nxv1i64_i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vsll.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2544,7 +2588,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2567,7 +2611,7 @@ define @intrinsic_vsll_mask_vi_nxv2i64_nxv2i64_i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vsll.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2576,7 +2620,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2599,7 +2643,7 @@ define @intrinsic_vsll_mask_vi_nxv4i64_nxv4i64_i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vsll.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2608,7 +2652,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2631,7 +2675,7 @@ define @intrinsic_vsll_mask_vi_nxv8i64_nxv8i64_i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vsll.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2640,7 +2684,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll @@ -26,12 +26,13 @@ , , , + i64, i64); define @intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vsll.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -70,12 +71,13 @@ , , , + i64, i64); define @intrinsic_vsll_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vsll.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -114,12 +116,13 @@ , , , + i64, i64); define @intrinsic_vsll_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vsll.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -158,12 +161,13 @@ , , , + i64, i64); define @intrinsic_vsll_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vsll.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -202,12 +206,13 @@ , , , + i64, i64); define @intrinsic_vsll_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vsll.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -246,12 +251,13 @@ , , , + i64, i64); define @intrinsic_vsll_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vsll.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -260,7 +266,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -290,13 +296,14 @@ , , , + i64, i64); define @intrinsic_vsll_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vsll.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -335,12 +342,13 @@ , , , + i64, i64); define @intrinsic_vsll_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vsll.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -379,12 +387,13 @@ , , , + i64, i64); define @intrinsic_vsll_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vsll.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -423,12 +432,13 @@ , , , + i64, i64); define @intrinsic_vsll_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vsll.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -467,12 +477,13 @@ , , , + i64, i64); define @intrinsic_vsll_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vsll.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -481,7 +492,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -511,12 +522,13 @@ , , , + i64, i64); define @intrinsic_vsll_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vsll.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -525,7 +537,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -555,13 +567,14 @@ , , , + i64, i64); define @intrinsic_vsll_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vsll.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -600,12 +613,13 @@ , , , + i64, i64); define @intrinsic_vsll_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vsll.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -644,12 +658,13 @@ , , , + i64, i64); define @intrinsic_vsll_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vsll.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -658,7 +673,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -688,12 +703,13 @@ , , , + i64, i64); define @intrinsic_vsll_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vsll.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -702,7 +718,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -732,12 +748,13 @@ , , , + i64, i64); define @intrinsic_vsll_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vsll.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +763,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -776,13 +793,14 @@ , , , + i64, i64); define @intrinsic_vsll_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vsll.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +809,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -821,12 +839,13 @@ , , , + i64, i64); define @intrinsic_vsll_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vsll.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +854,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -865,12 +884,13 @@ , , , + i64, i64); define @intrinsic_vsll_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vsll.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +899,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -909,12 +929,13 @@ , , , + i64, i64); define @intrinsic_vsll_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vsll.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +944,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -953,13 +974,14 @@ , , , + i64, i64); define @intrinsic_vsll_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vsll.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -968,7 +990,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -998,12 +1020,13 @@ , i64, , + i64, i64); define @intrinsic_vsll_mask_vx_nxv1i8_nxv1i8( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsll.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1012,7 +1035,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1042,12 +1065,13 @@ , i64, , + i64, i64); define @intrinsic_vsll_mask_vx_nxv2i8_nxv2i8( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsll.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1056,7 +1080,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1086,12 +1110,13 @@ , i64, , + i64, i64); define @intrinsic_vsll_mask_vx_nxv4i8_nxv4i8( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsll.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1100,7 +1125,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1130,12 +1155,13 @@ , i64, , + i64, i64); define @intrinsic_vsll_mask_vx_nxv8i8_nxv8i8( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsll.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1144,7 +1170,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1174,12 +1200,13 @@ , i64, , + i64, i64); define @intrinsic_vsll_mask_vx_nxv16i8_nxv16i8( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsll.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1188,7 +1215,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1218,12 +1245,13 @@ , i64, , + i64, i64); define @intrinsic_vsll_mask_vx_nxv32i8_nxv32i8( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vsll.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1232,7 +1260,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1262,12 +1290,13 @@ , i64, , + i64, i64); define @intrinsic_vsll_mask_vx_nxv64i8_nxv64i8( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vsll.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1276,7 +1305,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1306,12 +1335,13 @@ , i64, , + i64, i64); define @intrinsic_vsll_mask_vx_nxv1i16_nxv1i16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsll.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1320,7 +1350,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1350,12 +1380,13 @@ , i64, , + i64, i64); define @intrinsic_vsll_mask_vx_nxv2i16_nxv2i16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsll.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1364,7 +1395,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1394,12 +1425,13 @@ , i64, , + i64, i64); define @intrinsic_vsll_mask_vx_nxv4i16_nxv4i16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsll.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1408,7 +1440,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1438,12 +1470,13 @@ , i64, , + i64, i64); define @intrinsic_vsll_mask_vx_nxv8i16_nxv8i16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsll.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1452,7 +1485,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1482,12 +1515,13 @@ , i64, , + i64, i64); define @intrinsic_vsll_mask_vx_nxv16i16_nxv16i16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsll.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1496,7 +1530,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1526,12 +1560,13 @@ , i64, , + i64, i64); define @intrinsic_vsll_mask_vx_nxv32i16_nxv32i16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vsll.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1540,7 +1575,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1570,12 +1605,13 @@ , i64, , + i64, i64); define @intrinsic_vsll_mask_vx_nxv1i32_nxv1i32( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsll.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1584,7 +1620,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1614,12 +1650,13 @@ , i64, , + i64, i64); define @intrinsic_vsll_mask_vx_nxv2i32_nxv2i32( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsll.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1628,7 +1665,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1658,12 +1695,13 @@ , i64, , + i64, i64); define @intrinsic_vsll_mask_vx_nxv4i32_nxv4i32( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsll.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1672,7 +1710,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1702,12 +1740,13 @@ , i64, , + i64, i64); define @intrinsic_vsll_mask_vx_nxv8i32_nxv8i32( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsll.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1716,7 +1755,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1746,12 +1785,13 @@ , i64, , + i64, i64); define @intrinsic_vsll_mask_vx_nxv16i32_nxv16i32( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vsll.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1760,7 +1800,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1790,12 +1830,13 @@ , i64, , + i64, i64); define @intrinsic_vsll_mask_vx_nxv1i64_nxv1i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsll.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1804,7 +1845,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1834,12 +1875,13 @@ , i64, , + i64, i64); define @intrinsic_vsll_mask_vx_nxv2i64_nxv2i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsll.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1848,7 +1890,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1878,12 +1920,13 @@ , i64, , + i64, i64); define @intrinsic_vsll_mask_vx_nxv4i64_nxv4i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsll.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1892,7 +1935,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1922,12 +1965,13 @@ , i64, , + i64, i64); define @intrinsic_vsll_mask_vx_nxv8i64_nxv8i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vsll.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1936,7 +1980,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1959,7 +2003,7 @@ define @intrinsic_vsll_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vsll.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1968,7 +2012,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1991,7 +2035,7 @@ define @intrinsic_vsll_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vsll.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2000,7 +2044,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2023,7 +2067,7 @@ define @intrinsic_vsll_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vsll.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2032,7 +2076,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2055,7 +2099,7 @@ define @intrinsic_vsll_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vsll.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2064,7 +2108,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2087,7 +2131,7 @@ define @intrinsic_vsll_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vsll.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2096,7 +2140,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2119,7 +2163,7 @@ define @intrinsic_vsll_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vsll.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2128,7 +2172,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2151,7 +2195,7 @@ define @intrinsic_vsll_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vsll.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2160,7 +2204,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2183,7 +2227,7 @@ define @intrinsic_vsll_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vsll.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2192,7 +2236,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2215,7 +2259,7 @@ define @intrinsic_vsll_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vsll.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2224,7 +2268,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2247,7 +2291,7 @@ define @intrinsic_vsll_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vsll.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2256,7 +2300,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2279,7 +2323,7 @@ define @intrinsic_vsll_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vsll.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2288,7 +2332,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2311,7 +2355,7 @@ define @intrinsic_vsll_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vsll.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2320,7 +2364,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2343,7 +2387,7 @@ define @intrinsic_vsll_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vsll.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2352,7 +2396,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2375,7 +2419,7 @@ define @intrinsic_vsll_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vsll.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2384,7 +2428,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2407,7 +2451,7 @@ define @intrinsic_vsll_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vsll.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2416,7 +2460,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2439,7 +2483,7 @@ define @intrinsic_vsll_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vsll.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2448,7 +2492,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2471,7 +2515,7 @@ define @intrinsic_vsll_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vsll.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2480,7 +2524,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2503,7 +2547,7 @@ define @intrinsic_vsll_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vsll.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2512,7 +2556,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2535,7 +2579,7 @@ define @intrinsic_vsll_mask_vi_nxv1i64_nxv1i64_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vsll.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2544,7 +2588,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2567,7 +2611,7 @@ define @intrinsic_vsll_mask_vi_nxv2i64_nxv2i64_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vsll.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2576,7 +2620,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2599,7 +2643,7 @@ define @intrinsic_vsll_mask_vi_nxv4i64_nxv4i64_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vsll.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2608,7 +2652,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2631,7 +2675,7 @@ define @intrinsic_vsll_mask_vi_nxv8i64_nxv8i64_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vsll.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2640,7 +2684,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll @@ -26,12 +26,13 @@ , , , + i32, i32); define @intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -70,12 +71,13 @@ , , , + i32, i32); define @intrinsic_vsmul_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -114,12 +116,13 @@ , , , + i32, i32); define @intrinsic_vsmul_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -158,12 +161,13 @@ , , , + i32, i32); define @intrinsic_vsmul_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -202,12 +206,13 @@ , , , + i32, i32); define @intrinsic_vsmul_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vsmul.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -246,12 +251,13 @@ , , , + i32, i32); define @intrinsic_vsmul_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vsmul.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -260,7 +266,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -290,13 +296,14 @@ , , , + i32, i32); define @intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -335,12 +342,13 @@ , , , + i32, i32); define @intrinsic_vsmul_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -379,12 +387,13 @@ , , , + i32, i32); define @intrinsic_vsmul_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -423,12 +432,13 @@ , , , + i32, i32); define @intrinsic_vsmul_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -467,12 +477,13 @@ , , , + i32, i32); define @intrinsic_vsmul_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vsmul.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -481,7 +492,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -511,12 +522,13 @@ , , , + i32, i32); define @intrinsic_vsmul_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vsmul.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -525,7 +537,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -555,13 +567,14 @@ , , , + i32, i32); define @intrinsic_vsmul_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -600,12 +613,13 @@ , , , + i32, i32); define @intrinsic_vsmul_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -644,12 +658,13 @@ , , , + i32, i32); define @intrinsic_vsmul_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -658,7 +673,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -688,12 +703,13 @@ , , , + i32, i32); define @intrinsic_vsmul_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vsmul.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -702,7 +718,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -732,12 +748,13 @@ , , , + i32, i32); define @intrinsic_vsmul_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vsmul.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +763,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -776,13 +793,14 @@ , , , + i32, i32); define @intrinsic_vsmul_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +809,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -821,12 +839,13 @@ , , , + i32, i32); define @intrinsic_vsmul_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +854,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -865,12 +884,13 @@ , , , + i32, i32); define @intrinsic_vsmul_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vsmul.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +899,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -909,12 +929,13 @@ , , , + i32, i32); define @intrinsic_vsmul_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vsmul.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +944,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -953,13 +974,14 @@ , , , + i32, i32); define @intrinsic_vsmul_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -968,7 +990,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -998,12 +1020,13 @@ , i8, , + i32, i32); define @intrinsic_vsmul_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1012,7 +1035,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1042,12 +1065,13 @@ , i8, , + i32, i32); define @intrinsic_vsmul_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1056,7 +1080,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1086,12 +1110,13 @@ , i8, , + i32, i32); define @intrinsic_vsmul_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1100,7 +1125,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1130,12 +1155,13 @@ , i8, , + i32, i32); define @intrinsic_vsmul_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1144,7 +1170,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1174,12 +1200,13 @@ , i8, , + i32, i32); define @intrinsic_vsmul_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsmul.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1188,7 +1215,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1218,12 +1245,13 @@ , i8, , + i32, i32); define @intrinsic_vsmul_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vsmul.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1232,7 +1260,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1262,12 +1290,13 @@ , i8, , + i32, i32); define @intrinsic_vsmul_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vsmul.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1276,7 +1305,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1306,12 +1335,13 @@ , i16, , + i32, i32); define @intrinsic_vsmul_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1320,7 +1350,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1350,12 +1380,13 @@ , i16, , + i32, i32); define @intrinsic_vsmul_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1364,7 +1395,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1394,12 +1425,13 @@ , i16, , + i32, i32); define @intrinsic_vsmul_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1408,7 +1440,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1438,12 +1470,13 @@ , i16, , + i32, i32); define @intrinsic_vsmul_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsmul.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1452,7 +1485,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1482,12 +1515,13 @@ , i16, , + i32, i32); define @intrinsic_vsmul_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsmul.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1496,7 +1530,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1526,12 +1560,13 @@ , i16, , + i32, i32); define @intrinsic_vsmul_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vsmul.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1540,7 +1575,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1570,12 +1605,13 @@ , i32, , + i32, i32); define @intrinsic_vsmul_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1584,7 +1620,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1614,12 +1650,13 @@ , i32, , + i32, i32); define @intrinsic_vsmul_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1628,7 +1665,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1658,12 +1695,13 @@ , i32, , + i32, i32); define @intrinsic_vsmul_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsmul.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1672,7 +1710,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1702,12 +1740,13 @@ , i32, , + i32, i32); define @intrinsic_vsmul_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsmul.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1716,7 +1755,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1746,12 +1785,13 @@ , i32, , + i32, i32); define @intrinsic_vsmul_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vsmul.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1760,7 +1800,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1796,6 +1836,7 @@ , i64, , + i32, i32); define @intrinsic_vsmul_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1804,10 +1845,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v25, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vsmul.vv v8, v9, v25, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1817,7 +1858,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1853,6 +1894,7 @@ , i64, , + i32, i32); define @intrinsic_vsmul_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1861,10 +1903,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v26, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vsmul.vv v8, v10, v26, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1874,7 +1916,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1910,6 +1952,7 @@ , i64, , + i32, i32); define @intrinsic_vsmul_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1918,10 +1961,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v28, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vsmul.vv v8, v12, v28, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1931,7 +1974,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1967,6 +2010,7 @@ , i64, , + i32, i32); define @intrinsic_vsmul_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1975,10 +2019,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v24, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1988,7 +2032,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll @@ -26,12 +26,13 @@ , , , + i64, i64); define @intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -70,12 +71,13 @@ , , , + i64, i64); define @intrinsic_vsmul_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -114,12 +116,13 @@ , , , + i64, i64); define @intrinsic_vsmul_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -158,12 +161,13 @@ , , , + i64, i64); define @intrinsic_vsmul_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -202,12 +206,13 @@ , , , + i64, i64); define @intrinsic_vsmul_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vsmul.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -246,12 +251,13 @@ , , , + i64, i64); define @intrinsic_vsmul_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vsmul.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -260,7 +266,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -290,13 +296,14 @@ , , , + i64, i64); define @intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -335,12 +342,13 @@ , , , + i64, i64); define @intrinsic_vsmul_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -379,12 +387,13 @@ , , , + i64, i64); define @intrinsic_vsmul_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -423,12 +432,13 @@ , , , + i64, i64); define @intrinsic_vsmul_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -467,12 +477,13 @@ , , , + i64, i64); define @intrinsic_vsmul_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vsmul.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -481,7 +492,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -511,12 +522,13 @@ , , , + i64, i64); define @intrinsic_vsmul_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vsmul.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -525,7 +537,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -555,13 +567,14 @@ , , , + i64, i64); define @intrinsic_vsmul_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -600,12 +613,13 @@ , , , + i64, i64); define @intrinsic_vsmul_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -644,12 +658,13 @@ , , , + i64, i64); define @intrinsic_vsmul_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -658,7 +673,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -688,12 +703,13 @@ , , , + i64, i64); define @intrinsic_vsmul_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vsmul.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -702,7 +718,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -732,12 +748,13 @@ , , , + i64, i64); define @intrinsic_vsmul_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vsmul.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +763,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -776,13 +793,14 @@ , , , + i64, i64); define @intrinsic_vsmul_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +809,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -821,12 +839,13 @@ , , , + i64, i64); define @intrinsic_vsmul_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +854,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -865,12 +884,13 @@ , , , + i64, i64); define @intrinsic_vsmul_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vsmul.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +899,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -909,12 +929,13 @@ , , , + i64, i64); define @intrinsic_vsmul_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vsmul.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +944,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -953,13 +974,14 @@ , , , + i64, i64); define @intrinsic_vsmul_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -968,7 +990,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -998,12 +1020,13 @@ , i8, , + i64, i64); define @intrinsic_vsmul_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1012,7 +1035,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1042,12 +1065,13 @@ , i8, , + i64, i64); define @intrinsic_vsmul_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1056,7 +1080,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1086,12 +1110,13 @@ , i8, , + i64, i64); define @intrinsic_vsmul_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1100,7 +1125,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1130,12 +1155,13 @@ , i8, , + i64, i64); define @intrinsic_vsmul_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1144,7 +1170,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1174,12 +1200,13 @@ , i8, , + i64, i64); define @intrinsic_vsmul_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsmul.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1188,7 +1215,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1218,12 +1245,13 @@ , i8, , + i64, i64); define @intrinsic_vsmul_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vsmul.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1232,7 +1260,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1262,12 +1290,13 @@ , i8, , + i64, i64); define @intrinsic_vsmul_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vsmul.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1276,7 +1305,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1306,12 +1335,13 @@ , i16, , + i64, i64); define @intrinsic_vsmul_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1320,7 +1350,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1350,12 +1380,13 @@ , i16, , + i64, i64); define @intrinsic_vsmul_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1364,7 +1395,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1394,12 +1425,13 @@ , i16, , + i64, i64); define @intrinsic_vsmul_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1408,7 +1440,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1438,12 +1470,13 @@ , i16, , + i64, i64); define @intrinsic_vsmul_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsmul.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1452,7 +1485,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1482,12 +1515,13 @@ , i16, , + i64, i64); define @intrinsic_vsmul_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsmul.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1496,7 +1530,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1526,12 +1560,13 @@ , i16, , + i64, i64); define @intrinsic_vsmul_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vsmul.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1540,7 +1575,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1570,12 +1605,13 @@ , i32, , + i64, i64); define @intrinsic_vsmul_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1584,7 +1620,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1614,12 +1650,13 @@ , i32, , + i64, i64); define @intrinsic_vsmul_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1628,7 +1665,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1658,12 +1695,13 @@ , i32, , + i64, i64); define @intrinsic_vsmul_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsmul.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1672,7 +1710,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1702,12 +1740,13 @@ , i32, , + i64, i64); define @intrinsic_vsmul_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsmul.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1716,7 +1755,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1746,12 +1785,13 @@ , i32, , + i64, i64); define @intrinsic_vsmul_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vsmul.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1760,7 +1800,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1790,12 +1830,13 @@ , i64, , + i64, i64); define @intrinsic_vsmul_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1804,7 +1845,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1834,12 +1875,13 @@ , i64, , + i64, i64); define @intrinsic_vsmul_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsmul.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1848,7 +1890,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1878,12 +1920,13 @@ , i64, , + i64, i64); define @intrinsic_vsmul_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsmul.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1892,7 +1935,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1922,12 +1965,13 @@ , i64, , + i64, i64); define @intrinsic_vsmul_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vsmul.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1936,7 +1980,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll @@ -26,12 +26,13 @@ , , , + i32, i32); define @intrinsic_vsra_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vsra.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -70,12 +71,13 @@ , , , + i32, i32); define @intrinsic_vsra_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vsra.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -114,12 +116,13 @@ , , , + i32, i32); define @intrinsic_vsra_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vsra.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -158,12 +161,13 @@ , , , + i32, i32); define @intrinsic_vsra_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vsra.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -202,12 +206,13 @@ , , , + i32, i32); define @intrinsic_vsra_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vsra.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -246,12 +251,13 @@ , , , + i32, i32); define @intrinsic_vsra_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vsra.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -260,7 +266,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -290,13 +296,14 @@ , , , + i32, i32); define @intrinsic_vsra_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vsra.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -335,12 +342,13 @@ , , , + i32, i32); define @intrinsic_vsra_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vsra.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -379,12 +387,13 @@ , , , + i32, i32); define @intrinsic_vsra_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vsra.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -423,12 +432,13 @@ , , , + i32, i32); define @intrinsic_vsra_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vsra.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -467,12 +477,13 @@ , , , + i32, i32); define @intrinsic_vsra_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vsra.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -481,7 +492,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -511,12 +522,13 @@ , , , + i32, i32); define @intrinsic_vsra_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vsra.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -525,7 +537,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -555,13 +567,14 @@ , , , + i32, i32); define @intrinsic_vsra_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vsra.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -600,12 +613,13 @@ , , , + i32, i32); define @intrinsic_vsra_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vsra.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -644,12 +658,13 @@ , , , + i32, i32); define @intrinsic_vsra_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vsra.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -658,7 +673,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -688,12 +703,13 @@ , , , + i32, i32); define @intrinsic_vsra_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vsra.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -702,7 +718,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -732,12 +748,13 @@ , , , + i32, i32); define @intrinsic_vsra_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vsra.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +763,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -776,13 +793,14 @@ , , , + i32, i32); define @intrinsic_vsra_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vsra.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +809,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -821,12 +839,13 @@ , , , + i32, i32); define @intrinsic_vsra_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vsra.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +854,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -865,12 +884,13 @@ , , , + i32, i32); define @intrinsic_vsra_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vsra.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +899,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -909,12 +929,13 @@ , , , + i32, i32); define @intrinsic_vsra_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vsra.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +944,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -953,13 +974,14 @@ , , , + i32, i32); define @intrinsic_vsra_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vsra.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -968,7 +990,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -998,12 +1020,13 @@ , i32, , + i32, i32); define @intrinsic_vsra_mask_vx_nxv1i8_nxv1i8( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsra.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1012,7 +1035,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1042,12 +1065,13 @@ , i32, , + i32, i32); define @intrinsic_vsra_mask_vx_nxv2i8_nxv2i8( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsra.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1056,7 +1080,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1086,12 +1110,13 @@ , i32, , + i32, i32); define @intrinsic_vsra_mask_vx_nxv4i8_nxv4i8( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsra.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1100,7 +1125,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1130,12 +1155,13 @@ , i32, , + i32, i32); define @intrinsic_vsra_mask_vx_nxv8i8_nxv8i8( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsra.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1144,7 +1170,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1174,12 +1200,13 @@ , i32, , + i32, i32); define @intrinsic_vsra_mask_vx_nxv16i8_nxv16i8( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsra.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1188,7 +1215,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1218,12 +1245,13 @@ , i32, , + i32, i32); define @intrinsic_vsra_mask_vx_nxv32i8_nxv32i8( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vsra.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1232,7 +1260,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1262,12 +1290,13 @@ , i32, , + i32, i32); define @intrinsic_vsra_mask_vx_nxv64i8_nxv64i8( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vsra.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1276,7 +1305,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1306,12 +1335,13 @@ , i32, , + i32, i32); define @intrinsic_vsra_mask_vx_nxv1i16_nxv1i16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsra.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1320,7 +1350,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1350,12 +1380,13 @@ , i32, , + i32, i32); define @intrinsic_vsra_mask_vx_nxv2i16_nxv2i16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsra.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1364,7 +1395,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1394,12 +1425,13 @@ , i32, , + i32, i32); define @intrinsic_vsra_mask_vx_nxv4i16_nxv4i16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsra.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1408,7 +1440,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1438,12 +1470,13 @@ , i32, , + i32, i32); define @intrinsic_vsra_mask_vx_nxv8i16_nxv8i16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsra.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1452,7 +1485,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1482,12 +1515,13 @@ , i32, , + i32, i32); define @intrinsic_vsra_mask_vx_nxv16i16_nxv16i16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsra.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1496,7 +1530,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1526,12 +1560,13 @@ , i32, , + i32, i32); define @intrinsic_vsra_mask_vx_nxv32i16_nxv32i16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vsra.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1540,7 +1575,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1570,12 +1605,13 @@ , i32, , + i32, i32); define @intrinsic_vsra_mask_vx_nxv1i32_nxv1i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsra.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1584,7 +1620,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1614,12 +1650,13 @@ , i32, , + i32, i32); define @intrinsic_vsra_mask_vx_nxv2i32_nxv2i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsra.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1628,7 +1665,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1658,12 +1695,13 @@ , i32, , + i32, i32); define @intrinsic_vsra_mask_vx_nxv4i32_nxv4i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsra.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1672,7 +1710,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1702,12 +1740,13 @@ , i32, , + i32, i32); define @intrinsic_vsra_mask_vx_nxv8i32_nxv8i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsra.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1716,7 +1755,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1746,12 +1785,13 @@ , i32, , + i32, i32); define @intrinsic_vsra_mask_vx_nxv16i32_nxv16i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vsra.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1760,7 +1800,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1790,12 +1830,13 @@ , i32, , + i32, i32); define @intrinsic_vsra_mask_vx_nxv1i64_nxv1i64( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsra.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1804,7 +1845,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1834,12 +1875,13 @@ , i32, , + i32, i32); define @intrinsic_vsra_mask_vx_nxv2i64_nxv2i64( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsra.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1848,7 +1890,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1878,12 +1920,13 @@ , i32, , + i32, i32); define @intrinsic_vsra_mask_vx_nxv4i64_nxv4i64( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsra.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1892,7 +1935,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1922,12 +1965,13 @@ , i32, , + i32, i32); define @intrinsic_vsra_mask_vx_nxv8i64_nxv8i64( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vsra.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1936,7 +1980,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1959,7 +2003,7 @@ define @intrinsic_vsra_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vsra.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1968,7 +2012,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1991,7 +2035,7 @@ define @intrinsic_vsra_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vsra.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2000,7 +2044,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2023,7 +2067,7 @@ define @intrinsic_vsra_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vsra.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2032,7 +2076,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2055,7 +2099,7 @@ define @intrinsic_vsra_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vsra.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2064,7 +2108,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2087,7 +2131,7 @@ define @intrinsic_vsra_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vsra.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2096,7 +2140,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2119,7 +2163,7 @@ define @intrinsic_vsra_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vsra.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2128,7 +2172,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2151,7 +2195,7 @@ define @intrinsic_vsra_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vsra.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2160,7 +2204,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2183,7 +2227,7 @@ define @intrinsic_vsra_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vsra.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2192,7 +2236,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2215,7 +2259,7 @@ define @intrinsic_vsra_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vsra.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2224,7 +2268,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2247,7 +2291,7 @@ define @intrinsic_vsra_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vsra.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2256,7 +2300,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2279,7 +2323,7 @@ define @intrinsic_vsra_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vsra.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2288,7 +2332,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2311,7 +2355,7 @@ define @intrinsic_vsra_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vsra.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2320,7 +2364,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2343,7 +2387,7 @@ define @intrinsic_vsra_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vsra.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2352,7 +2396,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2375,7 +2419,7 @@ define @intrinsic_vsra_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vsra.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2384,7 +2428,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2407,7 +2451,7 @@ define @intrinsic_vsra_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vsra.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2416,7 +2460,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2439,7 +2483,7 @@ define @intrinsic_vsra_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vsra.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2448,7 +2492,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2471,7 +2515,7 @@ define @intrinsic_vsra_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vsra.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2480,7 +2524,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2503,7 +2547,7 @@ define @intrinsic_vsra_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vsra.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2512,7 +2556,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2535,7 +2579,7 @@ define @intrinsic_vsra_mask_vi_nxv1i64_nxv1i64_i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vsra.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2544,7 +2588,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2567,7 +2611,7 @@ define @intrinsic_vsra_mask_vi_nxv2i64_nxv2i64_i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vsra.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2576,7 +2620,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2599,7 +2643,7 @@ define @intrinsic_vsra_mask_vi_nxv4i64_nxv4i64_i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vsra.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2608,7 +2652,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2631,7 +2675,7 @@ define @intrinsic_vsra_mask_vi_nxv8i64_nxv8i64_i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vsra.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2640,7 +2684,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll @@ -26,12 +26,13 @@ , , , + i64, i64); define @intrinsic_vsra_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vsra.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -70,12 +71,13 @@ , , , + i64, i64); define @intrinsic_vsra_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vsra.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -114,12 +116,13 @@ , , , + i64, i64); define @intrinsic_vsra_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vsra.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -158,12 +161,13 @@ , , , + i64, i64); define @intrinsic_vsra_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vsra.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -202,12 +206,13 @@ , , , + i64, i64); define @intrinsic_vsra_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vsra.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -246,12 +251,13 @@ , , , + i64, i64); define @intrinsic_vsra_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vsra.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -260,7 +266,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -290,13 +296,14 @@ , , , + i64, i64); define @intrinsic_vsra_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vsra.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -335,12 +342,13 @@ , , , + i64, i64); define @intrinsic_vsra_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vsra.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -379,12 +387,13 @@ , , , + i64, i64); define @intrinsic_vsra_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vsra.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -423,12 +432,13 @@ , , , + i64, i64); define @intrinsic_vsra_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vsra.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -467,12 +477,13 @@ , , , + i64, i64); define @intrinsic_vsra_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vsra.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -481,7 +492,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -511,12 +522,13 @@ , , , + i64, i64); define @intrinsic_vsra_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vsra.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -525,7 +537,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -555,13 +567,14 @@ , , , + i64, i64); define @intrinsic_vsra_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vsra.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -600,12 +613,13 @@ , , , + i64, i64); define @intrinsic_vsra_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vsra.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -644,12 +658,13 @@ , , , + i64, i64); define @intrinsic_vsra_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vsra.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -658,7 +673,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -688,12 +703,13 @@ , , , + i64, i64); define @intrinsic_vsra_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vsra.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -702,7 +718,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -732,12 +748,13 @@ , , , + i64, i64); define @intrinsic_vsra_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vsra.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +763,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -776,13 +793,14 @@ , , , + i64, i64); define @intrinsic_vsra_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vsra.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +809,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -821,12 +839,13 @@ , , , + i64, i64); define @intrinsic_vsra_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vsra.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +854,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -865,12 +884,13 @@ , , , + i64, i64); define @intrinsic_vsra_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vsra.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +899,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -909,12 +929,13 @@ , , , + i64, i64); define @intrinsic_vsra_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vsra.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +944,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -953,13 +974,14 @@ , , , + i64, i64); define @intrinsic_vsra_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vsra.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -968,7 +990,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -998,12 +1020,13 @@ , i64, , + i64, i64); define @intrinsic_vsra_mask_vx_nxv1i8_nxv1i8( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsra.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1012,7 +1035,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1042,12 +1065,13 @@ , i64, , + i64, i64); define @intrinsic_vsra_mask_vx_nxv2i8_nxv2i8( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsra.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1056,7 +1080,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1086,12 +1110,13 @@ , i64, , + i64, i64); define @intrinsic_vsra_mask_vx_nxv4i8_nxv4i8( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsra.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1100,7 +1125,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1130,12 +1155,13 @@ , i64, , + i64, i64); define @intrinsic_vsra_mask_vx_nxv8i8_nxv8i8( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsra.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1144,7 +1170,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1174,12 +1200,13 @@ , i64, , + i64, i64); define @intrinsic_vsra_mask_vx_nxv16i8_nxv16i8( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsra.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1188,7 +1215,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1218,12 +1245,13 @@ , i64, , + i64, i64); define @intrinsic_vsra_mask_vx_nxv32i8_nxv32i8( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vsra.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1232,7 +1260,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1262,12 +1290,13 @@ , i64, , + i64, i64); define @intrinsic_vsra_mask_vx_nxv64i8_nxv64i8( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vsra.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1276,7 +1305,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1306,12 +1335,13 @@ , i64, , + i64, i64); define @intrinsic_vsra_mask_vx_nxv1i16_nxv1i16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsra.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1320,7 +1350,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1350,12 +1380,13 @@ , i64, , + i64, i64); define @intrinsic_vsra_mask_vx_nxv2i16_nxv2i16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsra.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1364,7 +1395,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1394,12 +1425,13 @@ , i64, , + i64, i64); define @intrinsic_vsra_mask_vx_nxv4i16_nxv4i16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsra.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1408,7 +1440,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1438,12 +1470,13 @@ , i64, , + i64, i64); define @intrinsic_vsra_mask_vx_nxv8i16_nxv8i16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsra.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1452,7 +1485,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1482,12 +1515,13 @@ , i64, , + i64, i64); define @intrinsic_vsra_mask_vx_nxv16i16_nxv16i16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsra.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1496,7 +1530,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1526,12 +1560,13 @@ , i64, , + i64, i64); define @intrinsic_vsra_mask_vx_nxv32i16_nxv32i16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vsra.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1540,7 +1575,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1570,12 +1605,13 @@ , i64, , + i64, i64); define @intrinsic_vsra_mask_vx_nxv1i32_nxv1i32( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsra.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1584,7 +1620,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1614,12 +1650,13 @@ , i64, , + i64, i64); define @intrinsic_vsra_mask_vx_nxv2i32_nxv2i32( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsra.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1628,7 +1665,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1658,12 +1695,13 @@ , i64, , + i64, i64); define @intrinsic_vsra_mask_vx_nxv4i32_nxv4i32( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsra.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1672,7 +1710,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1702,12 +1740,13 @@ , i64, , + i64, i64); define @intrinsic_vsra_mask_vx_nxv8i32_nxv8i32( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsra.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1716,7 +1755,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1746,12 +1785,13 @@ , i64, , + i64, i64); define @intrinsic_vsra_mask_vx_nxv16i32_nxv16i32( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vsra.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1760,7 +1800,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1790,12 +1830,13 @@ , i64, , + i64, i64); define @intrinsic_vsra_mask_vx_nxv1i64_nxv1i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsra.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1804,7 +1845,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1834,12 +1875,13 @@ , i64, , + i64, i64); define @intrinsic_vsra_mask_vx_nxv2i64_nxv2i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsra.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1848,7 +1890,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1878,12 +1920,13 @@ , i64, , + i64, i64); define @intrinsic_vsra_mask_vx_nxv4i64_nxv4i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsra.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1892,7 +1935,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1922,12 +1965,13 @@ , i64, , + i64, i64); define @intrinsic_vsra_mask_vx_nxv8i64_nxv8i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vsra.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1936,7 +1980,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1959,7 +2003,7 @@ define @intrinsic_vsra_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vsra.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1968,7 +2012,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1991,7 +2035,7 @@ define @intrinsic_vsra_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vsra.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2000,7 +2044,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2023,7 +2067,7 @@ define @intrinsic_vsra_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vsra.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2032,7 +2076,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2055,7 +2099,7 @@ define @intrinsic_vsra_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vsra.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2064,7 +2108,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2087,7 +2131,7 @@ define @intrinsic_vsra_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vsra.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2096,7 +2140,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2119,7 +2163,7 @@ define @intrinsic_vsra_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vsra.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2128,7 +2172,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2151,7 +2195,7 @@ define @intrinsic_vsra_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vsra.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2160,7 +2204,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2183,7 +2227,7 @@ define @intrinsic_vsra_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vsra.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2192,7 +2236,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2215,7 +2259,7 @@ define @intrinsic_vsra_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vsra.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2224,7 +2268,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2247,7 +2291,7 @@ define @intrinsic_vsra_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vsra.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2256,7 +2300,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2279,7 +2323,7 @@ define @intrinsic_vsra_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vsra.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2288,7 +2332,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2311,7 +2355,7 @@ define @intrinsic_vsra_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vsra.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2320,7 +2364,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2343,7 +2387,7 @@ define @intrinsic_vsra_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vsra.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2352,7 +2396,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2375,7 +2419,7 @@ define @intrinsic_vsra_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vsra.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2384,7 +2428,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2407,7 +2451,7 @@ define @intrinsic_vsra_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vsra.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2416,7 +2460,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2439,7 +2483,7 @@ define @intrinsic_vsra_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vsra.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2448,7 +2492,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2471,7 +2515,7 @@ define @intrinsic_vsra_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vsra.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2480,7 +2524,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2503,7 +2547,7 @@ define @intrinsic_vsra_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vsra.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2512,7 +2556,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2535,7 +2579,7 @@ define @intrinsic_vsra_mask_vi_nxv1i64_nxv1i64_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vsra.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2544,7 +2588,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2567,7 +2611,7 @@ define @intrinsic_vsra_mask_vi_nxv2i64_nxv2i64_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vsra.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2576,7 +2620,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2599,7 +2643,7 @@ define @intrinsic_vsra_mask_vi_nxv4i64_nxv4i64_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vsra.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2608,7 +2652,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2631,7 +2675,7 @@ define @intrinsic_vsra_mask_vi_nxv8i64_nxv8i64_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vsra.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2640,7 +2684,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll @@ -26,12 +26,13 @@ , , , + i32, i32); define @intrinsic_vsrl_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vsrl.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -70,12 +71,13 @@ , , , + i32, i32); define @intrinsic_vsrl_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vsrl.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -114,12 +116,13 @@ , , , + i32, i32); define @intrinsic_vsrl_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vsrl.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -158,12 +161,13 @@ , , , + i32, i32); define @intrinsic_vsrl_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vsrl.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -202,12 +206,13 @@ , , , + i32, i32); define @intrinsic_vsrl_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vsrl.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -246,12 +251,13 @@ , , , + i32, i32); define @intrinsic_vsrl_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vsrl.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -260,7 +266,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -290,13 +296,14 @@ , , , + i32, i32); define @intrinsic_vsrl_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vsrl.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -335,12 +342,13 @@ , , , + i32, i32); define @intrinsic_vsrl_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vsrl.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -379,12 +387,13 @@ , , , + i32, i32); define @intrinsic_vsrl_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vsrl.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -423,12 +432,13 @@ , , , + i32, i32); define @intrinsic_vsrl_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vsrl.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -467,12 +477,13 @@ , , , + i32, i32); define @intrinsic_vsrl_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vsrl.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -481,7 +492,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -511,12 +522,13 @@ , , , + i32, i32); define @intrinsic_vsrl_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vsrl.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -525,7 +537,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -555,13 +567,14 @@ , , , + i32, i32); define @intrinsic_vsrl_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vsrl.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -600,12 +613,13 @@ , , , + i32, i32); define @intrinsic_vsrl_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vsrl.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -644,12 +658,13 @@ , , , + i32, i32); define @intrinsic_vsrl_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vsrl.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -658,7 +673,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -688,12 +703,13 @@ , , , + i32, i32); define @intrinsic_vsrl_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vsrl.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -702,7 +718,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -732,12 +748,13 @@ , , , + i32, i32); define @intrinsic_vsrl_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vsrl.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +763,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -776,13 +793,14 @@ , , , + i32, i32); define @intrinsic_vsrl_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vsrl.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +809,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -821,12 +839,13 @@ , , , + i32, i32); define @intrinsic_vsrl_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vsrl.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +854,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -865,12 +884,13 @@ , , , + i32, i32); define @intrinsic_vsrl_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vsrl.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +899,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -909,12 +929,13 @@ , , , + i32, i32); define @intrinsic_vsrl_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vsrl.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +944,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -953,13 +974,14 @@ , , , + i32, i32); define @intrinsic_vsrl_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vsrl.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -968,7 +990,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -998,12 +1020,13 @@ , i32, , + i32, i32); define @intrinsic_vsrl_mask_vx_nxv1i8_nxv1i8( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1012,7 +1035,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1042,12 +1065,13 @@ , i32, , + i32, i32); define @intrinsic_vsrl_mask_vx_nxv2i8_nxv2i8( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1056,7 +1080,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1086,12 +1110,13 @@ , i32, , + i32, i32); define @intrinsic_vsrl_mask_vx_nxv4i8_nxv4i8( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1100,7 +1125,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1130,12 +1155,13 @@ , i32, , + i32, i32); define @intrinsic_vsrl_mask_vx_nxv8i8_nxv8i8( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1144,7 +1170,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1174,12 +1200,13 @@ , i32, , + i32, i32); define @intrinsic_vsrl_mask_vx_nxv16i8_nxv16i8( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsrl.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1188,7 +1215,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1218,12 +1245,13 @@ , i32, , + i32, i32); define @intrinsic_vsrl_mask_vx_nxv32i8_nxv32i8( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vsrl.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1232,7 +1260,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1262,12 +1290,13 @@ , i32, , + i32, i32); define @intrinsic_vsrl_mask_vx_nxv64i8_nxv64i8( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vsrl.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1276,7 +1305,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1306,12 +1335,13 @@ , i32, , + i32, i32); define @intrinsic_vsrl_mask_vx_nxv1i16_nxv1i16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1320,7 +1350,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1350,12 +1380,13 @@ , i32, , + i32, i32); define @intrinsic_vsrl_mask_vx_nxv2i16_nxv2i16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1364,7 +1395,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1394,12 +1425,13 @@ , i32, , + i32, i32); define @intrinsic_vsrl_mask_vx_nxv4i16_nxv4i16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1408,7 +1440,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1438,12 +1470,13 @@ , i32, , + i32, i32); define @intrinsic_vsrl_mask_vx_nxv8i16_nxv8i16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsrl.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1452,7 +1485,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1482,12 +1515,13 @@ , i32, , + i32, i32); define @intrinsic_vsrl_mask_vx_nxv16i16_nxv16i16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsrl.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1496,7 +1530,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1526,12 +1560,13 @@ , i32, , + i32, i32); define @intrinsic_vsrl_mask_vx_nxv32i16_nxv32i16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vsrl.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1540,7 +1575,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1570,12 +1605,13 @@ , i32, , + i32, i32); define @intrinsic_vsrl_mask_vx_nxv1i32_nxv1i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1584,7 +1620,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1614,12 +1650,13 @@ , i32, , + i32, i32); define @intrinsic_vsrl_mask_vx_nxv2i32_nxv2i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1628,7 +1665,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1658,12 +1695,13 @@ , i32, , + i32, i32); define @intrinsic_vsrl_mask_vx_nxv4i32_nxv4i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsrl.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1672,7 +1710,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1702,12 +1740,13 @@ , i32, , + i32, i32); define @intrinsic_vsrl_mask_vx_nxv8i32_nxv8i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsrl.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1716,7 +1755,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1746,12 +1785,13 @@ , i32, , + i32, i32); define @intrinsic_vsrl_mask_vx_nxv16i32_nxv16i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vsrl.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1760,7 +1800,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1790,12 +1830,13 @@ , i32, , + i32, i32); define @intrinsic_vsrl_mask_vx_nxv1i64_nxv1i64( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1804,7 +1845,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1834,12 +1875,13 @@ , i32, , + i32, i32); define @intrinsic_vsrl_mask_vx_nxv2i64_nxv2i64( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsrl.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1848,7 +1890,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1878,12 +1920,13 @@ , i32, , + i32, i32); define @intrinsic_vsrl_mask_vx_nxv4i64_nxv4i64( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsrl.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1892,7 +1935,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1922,12 +1965,13 @@ , i32, , + i32, i32); define @intrinsic_vsrl_mask_vx_nxv8i64_nxv8i64( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vsrl.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1936,7 +1980,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1959,7 +2003,7 @@ define @intrinsic_vsrl_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vsrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1968,7 +2012,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1991,7 +2035,7 @@ define @intrinsic_vsrl_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vsrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2000,7 +2044,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2023,7 +2067,7 @@ define @intrinsic_vsrl_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vsrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2032,7 +2076,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2055,7 +2099,7 @@ define @intrinsic_vsrl_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vsrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2064,7 +2108,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2087,7 +2131,7 @@ define @intrinsic_vsrl_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vsrl.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2096,7 +2140,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2119,7 +2163,7 @@ define @intrinsic_vsrl_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vsrl.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2128,7 +2172,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2151,7 +2195,7 @@ define @intrinsic_vsrl_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vsrl.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2160,7 +2204,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2183,7 +2227,7 @@ define @intrinsic_vsrl_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vsrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2192,7 +2236,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2215,7 +2259,7 @@ define @intrinsic_vsrl_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vsrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2224,7 +2268,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2247,7 +2291,7 @@ define @intrinsic_vsrl_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vsrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2256,7 +2300,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2279,7 +2323,7 @@ define @intrinsic_vsrl_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vsrl.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2288,7 +2332,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2311,7 +2355,7 @@ define @intrinsic_vsrl_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vsrl.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2320,7 +2364,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2343,7 +2387,7 @@ define @intrinsic_vsrl_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vsrl.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2352,7 +2396,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2375,7 +2419,7 @@ define @intrinsic_vsrl_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vsrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2384,7 +2428,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2407,7 +2451,7 @@ define @intrinsic_vsrl_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vsrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2416,7 +2460,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2439,7 +2483,7 @@ define @intrinsic_vsrl_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vsrl.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2448,7 +2492,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2471,7 +2515,7 @@ define @intrinsic_vsrl_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vsrl.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2480,7 +2524,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2503,7 +2547,7 @@ define @intrinsic_vsrl_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vsrl.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2512,7 +2556,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2535,7 +2579,7 @@ define @intrinsic_vsrl_mask_vi_nxv1i64_nxv1i64_i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vsrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2544,7 +2588,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2567,7 +2611,7 @@ define @intrinsic_vsrl_mask_vi_nxv2i64_nxv2i64_i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vsrl.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2576,7 +2620,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2599,7 +2643,7 @@ define @intrinsic_vsrl_mask_vi_nxv4i64_nxv4i64_i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vsrl.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2608,7 +2652,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2631,7 +2675,7 @@ define @intrinsic_vsrl_mask_vi_nxv8i64_nxv8i64_i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vsrl.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2640,7 +2684,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll @@ -26,12 +26,13 @@ , , , + i64, i64); define @intrinsic_vsrl_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vsrl.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -70,12 +71,13 @@ , , , + i64, i64); define @intrinsic_vsrl_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vsrl.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -114,12 +116,13 @@ , , , + i64, i64); define @intrinsic_vsrl_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vsrl.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -158,12 +161,13 @@ , , , + i64, i64); define @intrinsic_vsrl_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vsrl.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -202,12 +206,13 @@ , , , + i64, i64); define @intrinsic_vsrl_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vsrl.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -246,12 +251,13 @@ , , , + i64, i64); define @intrinsic_vsrl_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vsrl.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -260,7 +266,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -290,13 +296,14 @@ , , , + i64, i64); define @intrinsic_vsrl_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vsrl.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -335,12 +342,13 @@ , , , + i64, i64); define @intrinsic_vsrl_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vsrl.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -379,12 +387,13 @@ , , , + i64, i64); define @intrinsic_vsrl_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vsrl.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -423,12 +432,13 @@ , , , + i64, i64); define @intrinsic_vsrl_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vsrl.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -467,12 +477,13 @@ , , , + i64, i64); define @intrinsic_vsrl_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vsrl.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -481,7 +492,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -511,12 +522,13 @@ , , , + i64, i64); define @intrinsic_vsrl_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vsrl.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -525,7 +537,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -555,13 +567,14 @@ , , , + i64, i64); define @intrinsic_vsrl_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vsrl.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -600,12 +613,13 @@ , , , + i64, i64); define @intrinsic_vsrl_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vsrl.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -644,12 +658,13 @@ , , , + i64, i64); define @intrinsic_vsrl_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vsrl.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -658,7 +673,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -688,12 +703,13 @@ , , , + i64, i64); define @intrinsic_vsrl_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vsrl.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -702,7 +718,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -732,12 +748,13 @@ , , , + i64, i64); define @intrinsic_vsrl_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vsrl.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +763,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -776,13 +793,14 @@ , , , + i64, i64); define @intrinsic_vsrl_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vsrl.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +809,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -821,12 +839,13 @@ , , , + i64, i64); define @intrinsic_vsrl_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vsrl.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +854,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -865,12 +884,13 @@ , , , + i64, i64); define @intrinsic_vsrl_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vsrl.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +899,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -909,12 +929,13 @@ , , , + i64, i64); define @intrinsic_vsrl_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vsrl.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +944,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -953,13 +974,14 @@ , , , + i64, i64); define @intrinsic_vsrl_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vsrl.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -968,7 +990,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -998,12 +1020,13 @@ , i64, , + i64, i64); define @intrinsic_vsrl_mask_vx_nxv1i8_nxv1i8( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1012,7 +1035,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1042,12 +1065,13 @@ , i64, , + i64, i64); define @intrinsic_vsrl_mask_vx_nxv2i8_nxv2i8( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1056,7 +1080,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1086,12 +1110,13 @@ , i64, , + i64, i64); define @intrinsic_vsrl_mask_vx_nxv4i8_nxv4i8( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1100,7 +1125,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1130,12 +1155,13 @@ , i64, , + i64, i64); define @intrinsic_vsrl_mask_vx_nxv8i8_nxv8i8( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1144,7 +1170,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1174,12 +1200,13 @@ , i64, , + i64, i64); define @intrinsic_vsrl_mask_vx_nxv16i8_nxv16i8( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsrl.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1188,7 +1215,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1218,12 +1245,13 @@ , i64, , + i64, i64); define @intrinsic_vsrl_mask_vx_nxv32i8_nxv32i8( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vsrl.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1232,7 +1260,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1262,12 +1290,13 @@ , i64, , + i64, i64); define @intrinsic_vsrl_mask_vx_nxv64i8_nxv64i8( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vsrl.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1276,7 +1305,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1306,12 +1335,13 @@ , i64, , + i64, i64); define @intrinsic_vsrl_mask_vx_nxv1i16_nxv1i16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1320,7 +1350,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1350,12 +1380,13 @@ , i64, , + i64, i64); define @intrinsic_vsrl_mask_vx_nxv2i16_nxv2i16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1364,7 +1395,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1394,12 +1425,13 @@ , i64, , + i64, i64); define @intrinsic_vsrl_mask_vx_nxv4i16_nxv4i16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1408,7 +1440,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1438,12 +1470,13 @@ , i64, , + i64, i64); define @intrinsic_vsrl_mask_vx_nxv8i16_nxv8i16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsrl.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1452,7 +1485,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1482,12 +1515,13 @@ , i64, , + i64, i64); define @intrinsic_vsrl_mask_vx_nxv16i16_nxv16i16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsrl.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1496,7 +1530,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1526,12 +1560,13 @@ , i64, , + i64, i64); define @intrinsic_vsrl_mask_vx_nxv32i16_nxv32i16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vsrl.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1540,7 +1575,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1570,12 +1605,13 @@ , i64, , + i64, i64); define @intrinsic_vsrl_mask_vx_nxv1i32_nxv1i32( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1584,7 +1620,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1614,12 +1650,13 @@ , i64, , + i64, i64); define @intrinsic_vsrl_mask_vx_nxv2i32_nxv2i32( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1628,7 +1665,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1658,12 +1695,13 @@ , i64, , + i64, i64); define @intrinsic_vsrl_mask_vx_nxv4i32_nxv4i32( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsrl.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1672,7 +1710,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1702,12 +1740,13 @@ , i64, , + i64, i64); define @intrinsic_vsrl_mask_vx_nxv8i32_nxv8i32( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsrl.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1716,7 +1755,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1746,12 +1785,13 @@ , i64, , + i64, i64); define @intrinsic_vsrl_mask_vx_nxv16i32_nxv16i32( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vsrl.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1760,7 +1800,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1790,12 +1830,13 @@ , i64, , + i64, i64); define @intrinsic_vsrl_mask_vx_nxv1i64_nxv1i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1804,7 +1845,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1834,12 +1875,13 @@ , i64, , + i64, i64); define @intrinsic_vsrl_mask_vx_nxv2i64_nxv2i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsrl.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1848,7 +1890,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1878,12 +1920,13 @@ , i64, , + i64, i64); define @intrinsic_vsrl_mask_vx_nxv4i64_nxv4i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsrl.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1892,7 +1935,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1922,12 +1965,13 @@ , i64, , + i64, i64); define @intrinsic_vsrl_mask_vx_nxv8i64_nxv8i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vsrl.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1936,7 +1980,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1959,7 +2003,7 @@ define @intrinsic_vsrl_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vsrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1968,7 +2012,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1991,7 +2035,7 @@ define @intrinsic_vsrl_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vsrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2000,7 +2044,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2023,7 +2067,7 @@ define @intrinsic_vsrl_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vsrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2032,7 +2076,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2055,7 +2099,7 @@ define @intrinsic_vsrl_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vsrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2064,7 +2108,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2087,7 +2131,7 @@ define @intrinsic_vsrl_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vsrl.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2096,7 +2140,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2119,7 +2163,7 @@ define @intrinsic_vsrl_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vsrl.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2128,7 +2172,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2151,7 +2195,7 @@ define @intrinsic_vsrl_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vsrl.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2160,7 +2204,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2183,7 +2227,7 @@ define @intrinsic_vsrl_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vsrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2192,7 +2236,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2215,7 +2259,7 @@ define @intrinsic_vsrl_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vsrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2224,7 +2268,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2247,7 +2291,7 @@ define @intrinsic_vsrl_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vsrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2256,7 +2300,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2279,7 +2323,7 @@ define @intrinsic_vsrl_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vsrl.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2288,7 +2332,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2311,7 +2355,7 @@ define @intrinsic_vsrl_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vsrl.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2320,7 +2364,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2343,7 +2387,7 @@ define @intrinsic_vsrl_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vsrl.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2352,7 +2396,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2375,7 +2419,7 @@ define @intrinsic_vsrl_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vsrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2384,7 +2428,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2407,7 +2451,7 @@ define @intrinsic_vsrl_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vsrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2416,7 +2460,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2439,7 +2483,7 @@ define @intrinsic_vsrl_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vsrl.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2448,7 +2492,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2471,7 +2515,7 @@ define @intrinsic_vsrl_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vsrl.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2480,7 +2524,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2503,7 +2547,7 @@ define @intrinsic_vsrl_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vsrl.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2512,7 +2556,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2535,7 +2579,7 @@ define @intrinsic_vsrl_mask_vi_nxv1i64_nxv1i64_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vsrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2544,7 +2588,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2567,7 +2611,7 @@ define @intrinsic_vsrl_mask_vi_nxv2i64_nxv2i64_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vsrl.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2576,7 +2620,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2599,7 +2643,7 @@ define @intrinsic_vsrl_mask_vi_nxv4i64_nxv4i64_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vsrl.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2608,7 +2652,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2631,7 +2675,7 @@ define @intrinsic_vsrl_mask_vi_nxv8i64_nxv8i64_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vsrl.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2640,7 +2684,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll @@ -26,12 +26,13 @@ , , , + i32, i32); define @intrinsic_vssra_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vssra.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -70,12 +71,13 @@ , , , + i32, i32); define @intrinsic_vssra_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vssra.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -114,12 +116,13 @@ , , , + i32, i32); define @intrinsic_vssra_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vssra.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -158,12 +161,13 @@ , , , + i32, i32); define @intrinsic_vssra_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vssra.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -202,12 +206,13 @@ , , , + i32, i32); define @intrinsic_vssra_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vssra.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -246,12 +251,13 @@ , , , + i32, i32); define @intrinsic_vssra_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vssra.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -260,7 +266,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -290,13 +296,14 @@ , , , + i32, i32); define @intrinsic_vssra_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vssra.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -335,12 +342,13 @@ , , , + i32, i32); define @intrinsic_vssra_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vssra.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -379,12 +387,13 @@ , , , + i32, i32); define @intrinsic_vssra_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vssra.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -423,12 +432,13 @@ , , , + i32, i32); define @intrinsic_vssra_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vssra.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -467,12 +477,13 @@ , , , + i32, i32); define @intrinsic_vssra_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vssra.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -481,7 +492,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -511,12 +522,13 @@ , , , + i32, i32); define @intrinsic_vssra_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vssra.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -525,7 +537,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -555,13 +567,14 @@ , , , + i32, i32); define @intrinsic_vssra_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vssra.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -600,12 +613,13 @@ , , , + i32, i32); define @intrinsic_vssra_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vssra.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -644,12 +658,13 @@ , , , + i32, i32); define @intrinsic_vssra_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vssra.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -658,7 +673,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -688,12 +703,13 @@ , , , + i32, i32); define @intrinsic_vssra_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vssra.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -702,7 +718,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -732,12 +748,13 @@ , , , + i32, i32); define @intrinsic_vssra_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vssra.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +763,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -776,13 +793,14 @@ , , , + i32, i32); define @intrinsic_vssra_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vssra.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +809,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -821,12 +839,13 @@ , i32, , + i32, i32); define @intrinsic_vssra_mask_vx_nxv1i8_nxv1i8( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +854,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -865,12 +884,13 @@ , i32, , + i32, i32); define @intrinsic_vssra_mask_vx_nxv2i8_nxv2i8( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +899,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -909,12 +929,13 @@ , i32, , + i32, i32); define @intrinsic_vssra_mask_vx_nxv4i8_nxv4i8( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +944,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -953,12 +974,13 @@ , i32, , + i32, i32); define @intrinsic_vssra_mask_vx_nxv8i8_nxv8i8( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -967,7 +989,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -997,12 +1019,13 @@ , i32, , + i32, i32); define @intrinsic_vssra_mask_vx_nxv16i8_nxv16i8( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vssra.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1011,7 +1034,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1041,12 +1064,13 @@ , i32, , + i32, i32); define @intrinsic_vssra_mask_vx_nxv32i8_nxv32i8( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vssra.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1055,7 +1079,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1085,12 +1109,13 @@ , i32, , + i32, i32); define @intrinsic_vssra_mask_vx_nxv64i8_nxv64i8( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vssra.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1099,7 +1124,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1129,12 +1154,13 @@ , i32, , + i32, i32); define @intrinsic_vssra_mask_vx_nxv1i16_nxv1i16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1143,7 +1169,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1173,12 +1199,13 @@ , i32, , + i32, i32); define @intrinsic_vssra_mask_vx_nxv2i16_nxv2i16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1187,7 +1214,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1217,12 +1244,13 @@ , i32, , + i32, i32); define @intrinsic_vssra_mask_vx_nxv4i16_nxv4i16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1231,7 +1259,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1261,12 +1289,13 @@ , i32, , + i32, i32); define @intrinsic_vssra_mask_vx_nxv8i16_nxv8i16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vssra.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1275,7 +1304,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1305,12 +1334,13 @@ , i32, , + i32, i32); define @intrinsic_vssra_mask_vx_nxv16i16_nxv16i16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vssra.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1319,7 +1349,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1349,12 +1379,13 @@ , i32, , + i32, i32); define @intrinsic_vssra_mask_vx_nxv32i16_nxv32i16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vssra.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1363,7 +1394,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1393,12 +1424,13 @@ , i32, , + i32, i32); define @intrinsic_vssra_mask_vx_nxv1i32_nxv1i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1407,7 +1439,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1437,12 +1469,13 @@ , i32, , + i32, i32); define @intrinsic_vssra_mask_vx_nxv2i32_nxv2i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1451,7 +1484,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1481,12 +1514,13 @@ , i32, , + i32, i32); define @intrinsic_vssra_mask_vx_nxv4i32_nxv4i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vssra.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1495,7 +1529,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1525,12 +1559,13 @@ , i32, , + i32, i32); define @intrinsic_vssra_mask_vx_nxv8i32_nxv8i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vssra.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1539,7 +1574,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1569,12 +1604,13 @@ , i32, , + i32, i32); define @intrinsic_vssra_mask_vx_nxv16i32_nxv16i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vssra.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1583,7 +1619,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1613,12 +1649,13 @@ , i32, , + i32, i32); define @intrinsic_vssra_mask_vx_nxv1i64_nxv1i64( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1627,7 +1664,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1657,12 +1694,13 @@ , i32, , + i32, i32); define @intrinsic_vssra_mask_vx_nxv2i64_nxv2i64( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vssra.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1671,7 +1709,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1701,12 +1739,13 @@ , i32, , + i32, i32); define @intrinsic_vssra_mask_vx_nxv4i64_nxv4i64( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vssra.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1715,7 +1754,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1745,12 +1784,13 @@ , i32, , + i32, i32); define @intrinsic_vssra_mask_vx_nxv8i64_nxv8i64( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vssra.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1759,7 +1799,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1782,7 +1822,7 @@ define @intrinsic_vssra_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1791,7 +1831,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1814,7 +1854,7 @@ define @intrinsic_vssra_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1823,7 +1863,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1846,7 +1886,7 @@ define @intrinsic_vssra_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1855,7 +1895,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1878,7 +1918,7 @@ define @intrinsic_vssra_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1887,7 +1927,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1910,7 +1950,7 @@ define @intrinsic_vssra_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vssra.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1919,7 +1959,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1942,7 +1982,7 @@ define @intrinsic_vssra_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vssra.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1951,7 +1991,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1974,7 +2014,7 @@ define @intrinsic_vssra_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vssra.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1983,7 +2023,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2006,7 +2046,7 @@ define @intrinsic_vssra_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2015,7 +2055,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2038,7 +2078,7 @@ define @intrinsic_vssra_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2047,7 +2087,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2070,7 +2110,7 @@ define @intrinsic_vssra_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2079,7 +2119,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2102,7 +2142,7 @@ define @intrinsic_vssra_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vssra.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2111,7 +2151,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2134,7 +2174,7 @@ define @intrinsic_vssra_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vssra.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2143,7 +2183,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2166,7 +2206,7 @@ define @intrinsic_vssra_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vssra.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2175,7 +2215,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2198,7 +2238,7 @@ define @intrinsic_vssra_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2207,7 +2247,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2230,7 +2270,7 @@ define @intrinsic_vssra_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2239,7 +2279,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2262,7 +2302,7 @@ define @intrinsic_vssra_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vssra.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2271,7 +2311,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2294,7 +2334,7 @@ define @intrinsic_vssra_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vssra.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2303,7 +2343,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2326,7 +2366,7 @@ define @intrinsic_vssra_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vssra.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2335,7 +2375,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll @@ -26,12 +26,13 @@ , , , + i64, i64); define @intrinsic_vssra_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vssra.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -70,12 +71,13 @@ , , , + i64, i64); define @intrinsic_vssra_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vssra.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -114,12 +116,13 @@ , , , + i64, i64); define @intrinsic_vssra_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vssra.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -158,12 +161,13 @@ , , , + i64, i64); define @intrinsic_vssra_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vssra.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -202,12 +206,13 @@ , , , + i64, i64); define @intrinsic_vssra_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vssra.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -246,12 +251,13 @@ , , , + i64, i64); define @intrinsic_vssra_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vssra.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -260,7 +266,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -290,13 +296,14 @@ , , , + i64, i64); define @intrinsic_vssra_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vssra.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -335,12 +342,13 @@ , , , + i64, i64); define @intrinsic_vssra_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vssra.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -379,12 +387,13 @@ , , , + i64, i64); define @intrinsic_vssra_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vssra.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -423,12 +432,13 @@ , , , + i64, i64); define @intrinsic_vssra_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vssra.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -467,12 +477,13 @@ , , , + i64, i64); define @intrinsic_vssra_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vssra.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -481,7 +492,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -511,12 +522,13 @@ , , , + i64, i64); define @intrinsic_vssra_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vssra.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -525,7 +537,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -555,13 +567,14 @@ , , , + i64, i64); define @intrinsic_vssra_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vssra.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -600,12 +613,13 @@ , , , + i64, i64); define @intrinsic_vssra_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vssra.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -644,12 +658,13 @@ , , , + i64, i64); define @intrinsic_vssra_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vssra.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -658,7 +673,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -688,12 +703,13 @@ , , , + i64, i64); define @intrinsic_vssra_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vssra.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -702,7 +718,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -732,12 +748,13 @@ , , , + i64, i64); define @intrinsic_vssra_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vssra.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +763,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -776,13 +793,14 @@ , , , + i64, i64); define @intrinsic_vssra_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vssra.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +809,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -821,12 +839,13 @@ , , , + i64, i64); define @intrinsic_vssra_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vssra.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +854,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -865,12 +884,13 @@ , , , + i64, i64); define @intrinsic_vssra_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vssra.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +899,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -909,12 +929,13 @@ , , , + i64, i64); define @intrinsic_vssra_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vssra.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +944,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -953,13 +974,14 @@ , , , + i64, i64); define @intrinsic_vssra_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vssra.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -968,7 +990,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -998,12 +1020,13 @@ , i64, , + i64, i64); define @intrinsic_vssra_mask_vx_nxv1i8_nxv1i8( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1012,7 +1035,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1042,12 +1065,13 @@ , i64, , + i64, i64); define @intrinsic_vssra_mask_vx_nxv2i8_nxv2i8( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1056,7 +1080,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1086,12 +1110,13 @@ , i64, , + i64, i64); define @intrinsic_vssra_mask_vx_nxv4i8_nxv4i8( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1100,7 +1125,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1130,12 +1155,13 @@ , i64, , + i64, i64); define @intrinsic_vssra_mask_vx_nxv8i8_nxv8i8( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1144,7 +1170,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1174,12 +1200,13 @@ , i64, , + i64, i64); define @intrinsic_vssra_mask_vx_nxv16i8_nxv16i8( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vssra.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1188,7 +1215,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1218,12 +1245,13 @@ , i64, , + i64, i64); define @intrinsic_vssra_mask_vx_nxv32i8_nxv32i8( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vssra.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1232,7 +1260,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1262,12 +1290,13 @@ , i64, , + i64, i64); define @intrinsic_vssra_mask_vx_nxv64i8_nxv64i8( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vssra.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1276,7 +1305,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1306,12 +1335,13 @@ , i64, , + i64, i64); define @intrinsic_vssra_mask_vx_nxv1i16_nxv1i16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1320,7 +1350,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1350,12 +1380,13 @@ , i64, , + i64, i64); define @intrinsic_vssra_mask_vx_nxv2i16_nxv2i16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1364,7 +1395,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1394,12 +1425,13 @@ , i64, , + i64, i64); define @intrinsic_vssra_mask_vx_nxv4i16_nxv4i16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1408,7 +1440,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1438,12 +1470,13 @@ , i64, , + i64, i64); define @intrinsic_vssra_mask_vx_nxv8i16_nxv8i16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vssra.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1452,7 +1485,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1482,12 +1515,13 @@ , i64, , + i64, i64); define @intrinsic_vssra_mask_vx_nxv16i16_nxv16i16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vssra.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1496,7 +1530,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1526,12 +1560,13 @@ , i64, , + i64, i64); define @intrinsic_vssra_mask_vx_nxv32i16_nxv32i16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vssra.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1540,7 +1575,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1570,12 +1605,13 @@ , i64, , + i64, i64); define @intrinsic_vssra_mask_vx_nxv1i32_nxv1i32( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1584,7 +1620,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1614,12 +1650,13 @@ , i64, , + i64, i64); define @intrinsic_vssra_mask_vx_nxv2i32_nxv2i32( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1628,7 +1665,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1658,12 +1695,13 @@ , i64, , + i64, i64); define @intrinsic_vssra_mask_vx_nxv4i32_nxv4i32( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vssra.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1672,7 +1710,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1702,12 +1740,13 @@ , i64, , + i64, i64); define @intrinsic_vssra_mask_vx_nxv8i32_nxv8i32( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vssra.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1716,7 +1755,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1746,12 +1785,13 @@ , i64, , + i64, i64); define @intrinsic_vssra_mask_vx_nxv16i32_nxv16i32( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vssra.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1760,7 +1800,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1790,12 +1830,13 @@ , i64, , + i64, i64); define @intrinsic_vssra_mask_vx_nxv1i64_nxv1i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1804,7 +1845,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1834,12 +1875,13 @@ , i64, , + i64, i64); define @intrinsic_vssra_mask_vx_nxv2i64_nxv2i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vssra.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1848,7 +1890,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1878,12 +1920,13 @@ , i64, , + i64, i64); define @intrinsic_vssra_mask_vx_nxv4i64_nxv4i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vssra.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1892,7 +1935,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1922,12 +1965,13 @@ , i64, , + i64, i64); define @intrinsic_vssra_mask_vx_nxv8i64_nxv8i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vssra.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1936,7 +1980,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1959,7 +2003,7 @@ define @intrinsic_vssra_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1968,7 +2012,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1991,7 +2035,7 @@ define @intrinsic_vssra_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2000,7 +2044,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2023,7 +2067,7 @@ define @intrinsic_vssra_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2032,7 +2076,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2055,7 +2099,7 @@ define @intrinsic_vssra_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2064,7 +2108,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2087,7 +2131,7 @@ define @intrinsic_vssra_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vssra.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2096,7 +2140,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2119,7 +2163,7 @@ define @intrinsic_vssra_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vssra.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2128,7 +2172,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2151,7 +2195,7 @@ define @intrinsic_vssra_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vssra.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2160,7 +2204,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2183,7 +2227,7 @@ define @intrinsic_vssra_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2192,7 +2236,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2215,7 +2259,7 @@ define @intrinsic_vssra_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2224,7 +2268,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2247,7 +2291,7 @@ define @intrinsic_vssra_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2256,7 +2300,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2279,7 +2323,7 @@ define @intrinsic_vssra_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vssra.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2288,7 +2332,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2311,7 +2355,7 @@ define @intrinsic_vssra_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vssra.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2320,7 +2364,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2343,7 +2387,7 @@ define @intrinsic_vssra_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vssra.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2352,7 +2396,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2375,7 +2419,7 @@ define @intrinsic_vssra_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2384,7 +2428,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2407,7 +2451,7 @@ define @intrinsic_vssra_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2416,7 +2460,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2439,7 +2483,7 @@ define @intrinsic_vssra_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vssra.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2448,7 +2492,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2471,7 +2515,7 @@ define @intrinsic_vssra_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vssra.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2480,7 +2524,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2503,7 +2547,7 @@ define @intrinsic_vssra_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vssra.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2512,7 +2556,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2535,7 +2579,7 @@ define @intrinsic_vssra_mask_vi_nxv1i64_nxv1i64_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2544,7 +2588,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2567,7 +2611,7 @@ define @intrinsic_vssra_mask_vi_nxv2i64_nxv2i64_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vssra.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2576,7 +2620,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2599,7 +2643,7 @@ define @intrinsic_vssra_mask_vi_nxv4i64_nxv4i64_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vssra.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2608,7 +2652,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2631,7 +2675,7 @@ define @intrinsic_vssra_mask_vi_nxv8i64_nxv8i64_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vssra.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2640,7 +2684,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll @@ -26,12 +26,13 @@ , , , + i32, i32); define @intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -70,12 +71,13 @@ , , , + i32, i32); define @intrinsic_vssrl_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -114,12 +116,13 @@ , , , + i32, i32); define @intrinsic_vssrl_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -158,12 +161,13 @@ , , , + i32, i32); define @intrinsic_vssrl_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -202,12 +206,13 @@ , , , + i32, i32); define @intrinsic_vssrl_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vssrl.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -246,12 +251,13 @@ , , , + i32, i32); define @intrinsic_vssrl_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vssrl.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -260,7 +266,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -290,13 +296,14 @@ , , , + i32, i32); define @intrinsic_vssrl_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vssrl.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -335,12 +342,13 @@ , , , + i32, i32); define @intrinsic_vssrl_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -379,12 +387,13 @@ , , , + i32, i32); define @intrinsic_vssrl_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -423,12 +432,13 @@ , , , + i32, i32); define @intrinsic_vssrl_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -467,12 +477,13 @@ , , , + i32, i32); define @intrinsic_vssrl_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vssrl.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -481,7 +492,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -511,12 +522,13 @@ , , , + i32, i32); define @intrinsic_vssrl_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vssrl.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -525,7 +537,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -555,13 +567,14 @@ , , , + i32, i32); define @intrinsic_vssrl_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vssrl.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -600,12 +613,13 @@ , , , + i32, i32); define @intrinsic_vssrl_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -644,12 +658,13 @@ , , , + i32, i32); define @intrinsic_vssrl_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -658,7 +673,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -688,12 +703,13 @@ , , , + i32, i32); define @intrinsic_vssrl_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vssrl.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -702,7 +718,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -732,12 +748,13 @@ , , , + i32, i32); define @intrinsic_vssrl_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vssrl.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +763,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -776,13 +793,14 @@ , , , + i32, i32); define @intrinsic_vssrl_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vssrl.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +809,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -821,12 +839,13 @@ , i32, , + i32, i32); define @intrinsic_vssrl_mask_vx_nxv1i8_nxv1i8( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +854,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -865,12 +884,13 @@ , i32, , + i32, i32); define @intrinsic_vssrl_mask_vx_nxv2i8_nxv2i8( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +899,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -909,12 +929,13 @@ , i32, , + i32, i32); define @intrinsic_vssrl_mask_vx_nxv4i8_nxv4i8( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +944,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -953,12 +974,13 @@ , i32, , + i32, i32); define @intrinsic_vssrl_mask_vx_nxv8i8_nxv8i8( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -967,7 +989,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -997,12 +1019,13 @@ , i32, , + i32, i32); define @intrinsic_vssrl_mask_vx_nxv16i8_nxv16i8( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vssrl.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1011,7 +1034,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1041,12 +1064,13 @@ , i32, , + i32, i32); define @intrinsic_vssrl_mask_vx_nxv32i8_nxv32i8( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vssrl.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1055,7 +1079,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1085,12 +1109,13 @@ , i32, , + i32, i32); define @intrinsic_vssrl_mask_vx_nxv64i8_nxv64i8( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vssrl.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1099,7 +1124,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1129,12 +1154,13 @@ , i32, , + i32, i32); define @intrinsic_vssrl_mask_vx_nxv1i16_nxv1i16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1143,7 +1169,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1173,12 +1199,13 @@ , i32, , + i32, i32); define @intrinsic_vssrl_mask_vx_nxv2i16_nxv2i16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1187,7 +1214,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1217,12 +1244,13 @@ , i32, , + i32, i32); define @intrinsic_vssrl_mask_vx_nxv4i16_nxv4i16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1231,7 +1259,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1261,12 +1289,13 @@ , i32, , + i32, i32); define @intrinsic_vssrl_mask_vx_nxv8i16_nxv8i16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vssrl.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1275,7 +1304,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1305,12 +1334,13 @@ , i32, , + i32, i32); define @intrinsic_vssrl_mask_vx_nxv16i16_nxv16i16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vssrl.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1319,7 +1349,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1349,12 +1379,13 @@ , i32, , + i32, i32); define @intrinsic_vssrl_mask_vx_nxv32i16_nxv32i16( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vssrl.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1363,7 +1394,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1393,12 +1424,13 @@ , i32, , + i32, i32); define @intrinsic_vssrl_mask_vx_nxv1i32_nxv1i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1407,7 +1439,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1437,12 +1469,13 @@ , i32, , + i32, i32); define @intrinsic_vssrl_mask_vx_nxv2i32_nxv2i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1451,7 +1484,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1481,12 +1514,13 @@ , i32, , + i32, i32); define @intrinsic_vssrl_mask_vx_nxv4i32_nxv4i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vssrl.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1495,7 +1529,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1525,12 +1559,13 @@ , i32, , + i32, i32); define @intrinsic_vssrl_mask_vx_nxv8i32_nxv8i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vssrl.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1539,7 +1574,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1569,12 +1604,13 @@ , i32, , + i32, i32); define @intrinsic_vssrl_mask_vx_nxv16i32_nxv16i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vssrl.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1583,7 +1619,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1613,12 +1649,13 @@ , i32, , + i32, i32); define @intrinsic_vssrl_mask_vx_nxv1i64_nxv1i64( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1627,7 +1664,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1657,12 +1694,13 @@ , i32, , + i32, i32); define @intrinsic_vssrl_mask_vx_nxv2i64_nxv2i64( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vssrl.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1671,7 +1709,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1701,12 +1739,13 @@ , i32, , + i32, i32); define @intrinsic_vssrl_mask_vx_nxv4i64_nxv4i64( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vssrl.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1715,7 +1754,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1745,12 +1784,13 @@ , i32, , + i32, i32); define @intrinsic_vssrl_mask_vx_nxv8i64_nxv8i64( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vssrl.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1759,7 +1799,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1782,7 +1822,7 @@ define @intrinsic_vssrl_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1791,7 +1831,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1814,7 +1854,7 @@ define @intrinsic_vssrl_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1823,7 +1863,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1846,7 +1886,7 @@ define @intrinsic_vssrl_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1855,7 +1895,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1878,7 +1918,7 @@ define @intrinsic_vssrl_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1887,7 +1927,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1910,7 +1950,7 @@ define @intrinsic_vssrl_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vssrl.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1919,7 +1959,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1942,7 +1982,7 @@ define @intrinsic_vssrl_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vssrl.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1951,7 +1991,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1974,7 +2014,7 @@ define @intrinsic_vssrl_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vssrl.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1983,7 +2023,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2006,7 +2046,7 @@ define @intrinsic_vssrl_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2015,7 +2055,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2038,7 +2078,7 @@ define @intrinsic_vssrl_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2047,7 +2087,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2070,7 +2110,7 @@ define @intrinsic_vssrl_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2079,7 +2119,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2102,7 +2142,7 @@ define @intrinsic_vssrl_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vssrl.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2111,7 +2151,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2134,7 +2174,7 @@ define @intrinsic_vssrl_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vssrl.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2143,7 +2183,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2166,7 +2206,7 @@ define @intrinsic_vssrl_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vssrl.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2175,7 +2215,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2198,7 +2238,7 @@ define @intrinsic_vssrl_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2207,7 +2247,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2230,7 +2270,7 @@ define @intrinsic_vssrl_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2239,7 +2279,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2262,7 +2302,7 @@ define @intrinsic_vssrl_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vssrl.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2271,7 +2311,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2294,7 +2334,7 @@ define @intrinsic_vssrl_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vssrl.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2303,7 +2343,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2326,7 +2366,7 @@ define @intrinsic_vssrl_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vssrl.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2335,7 +2375,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll @@ -26,12 +26,13 @@ , , , + i64, i64); define @intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -70,12 +71,13 @@ , , , + i64, i64); define @intrinsic_vssrl_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -114,12 +116,13 @@ , , , + i64, i64); define @intrinsic_vssrl_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -158,12 +161,13 @@ , , , + i64, i64); define @intrinsic_vssrl_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -202,12 +206,13 @@ , , , + i64, i64); define @intrinsic_vssrl_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vssrl.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -246,12 +251,13 @@ , , , + i64, i64); define @intrinsic_vssrl_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vssrl.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -260,7 +266,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -290,13 +296,14 @@ , , , + i64, i64); define @intrinsic_vssrl_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vssrl.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -335,12 +342,13 @@ , , , + i64, i64); define @intrinsic_vssrl_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -379,12 +387,13 @@ , , , + i64, i64); define @intrinsic_vssrl_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -423,12 +432,13 @@ , , , + i64, i64); define @intrinsic_vssrl_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -467,12 +477,13 @@ , , , + i64, i64); define @intrinsic_vssrl_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vssrl.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -481,7 +492,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -511,12 +522,13 @@ , , , + i64, i64); define @intrinsic_vssrl_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vssrl.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -525,7 +537,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -555,13 +567,14 @@ , , , + i64, i64); define @intrinsic_vssrl_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vssrl.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -600,12 +613,13 @@ , , , + i64, i64); define @intrinsic_vssrl_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -644,12 +658,13 @@ , , , + i64, i64); define @intrinsic_vssrl_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -658,7 +673,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -688,12 +703,13 @@ , , , + i64, i64); define @intrinsic_vssrl_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vssrl.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -702,7 +718,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -732,12 +748,13 @@ , , , + i64, i64); define @intrinsic_vssrl_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vssrl.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +763,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -776,13 +793,14 @@ , , , + i64, i64); define @intrinsic_vssrl_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vssrl.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +809,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -821,12 +839,13 @@ , , , + i64, i64); define @intrinsic_vssrl_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +854,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -865,12 +884,13 @@ , , , + i64, i64); define @intrinsic_vssrl_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vssrl.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +899,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -909,12 +929,13 @@ , , , + i64, i64); define @intrinsic_vssrl_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vssrl.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +944,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -953,13 +974,14 @@ , , , + i64, i64); define @intrinsic_vssrl_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vssrl.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -968,7 +990,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -998,12 +1020,13 @@ , i64, , + i64, i64); define @intrinsic_vssrl_mask_vx_nxv1i8_nxv1i8( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1012,7 +1035,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1042,12 +1065,13 @@ , i64, , + i64, i64); define @intrinsic_vssrl_mask_vx_nxv2i8_nxv2i8( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1056,7 +1080,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1086,12 +1110,13 @@ , i64, , + i64, i64); define @intrinsic_vssrl_mask_vx_nxv4i8_nxv4i8( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1100,7 +1125,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1130,12 +1155,13 @@ , i64, , + i64, i64); define @intrinsic_vssrl_mask_vx_nxv8i8_nxv8i8( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1144,7 +1170,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1174,12 +1200,13 @@ , i64, , + i64, i64); define @intrinsic_vssrl_mask_vx_nxv16i8_nxv16i8( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vssrl.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1188,7 +1215,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1218,12 +1245,13 @@ , i64, , + i64, i64); define @intrinsic_vssrl_mask_vx_nxv32i8_nxv32i8( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vssrl.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1232,7 +1260,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1262,12 +1290,13 @@ , i64, , + i64, i64); define @intrinsic_vssrl_mask_vx_nxv64i8_nxv64i8( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vssrl.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1276,7 +1305,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1306,12 +1335,13 @@ , i64, , + i64, i64); define @intrinsic_vssrl_mask_vx_nxv1i16_nxv1i16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1320,7 +1350,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1350,12 +1380,13 @@ , i64, , + i64, i64); define @intrinsic_vssrl_mask_vx_nxv2i16_nxv2i16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1364,7 +1395,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1394,12 +1425,13 @@ , i64, , + i64, i64); define @intrinsic_vssrl_mask_vx_nxv4i16_nxv4i16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1408,7 +1440,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1438,12 +1470,13 @@ , i64, , + i64, i64); define @intrinsic_vssrl_mask_vx_nxv8i16_nxv8i16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vssrl.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1452,7 +1485,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1482,12 +1515,13 @@ , i64, , + i64, i64); define @intrinsic_vssrl_mask_vx_nxv16i16_nxv16i16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vssrl.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1496,7 +1530,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1526,12 +1560,13 @@ , i64, , + i64, i64); define @intrinsic_vssrl_mask_vx_nxv32i16_nxv32i16( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vssrl.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1540,7 +1575,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1570,12 +1605,13 @@ , i64, , + i64, i64); define @intrinsic_vssrl_mask_vx_nxv1i32_nxv1i32( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1584,7 +1620,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1614,12 +1650,13 @@ , i64, , + i64, i64); define @intrinsic_vssrl_mask_vx_nxv2i32_nxv2i32( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1628,7 +1665,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1658,12 +1695,13 @@ , i64, , + i64, i64); define @intrinsic_vssrl_mask_vx_nxv4i32_nxv4i32( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vssrl.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1672,7 +1710,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1702,12 +1740,13 @@ , i64, , + i64, i64); define @intrinsic_vssrl_mask_vx_nxv8i32_nxv8i32( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vssrl.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1716,7 +1755,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1746,12 +1785,13 @@ , i64, , + i64, i64); define @intrinsic_vssrl_mask_vx_nxv16i32_nxv16i32( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vssrl.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1760,7 +1800,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1790,12 +1830,13 @@ , i64, , + i64, i64); define @intrinsic_vssrl_mask_vx_nxv1i64_nxv1i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1804,7 +1845,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1834,12 +1875,13 @@ , i64, , + i64, i64); define @intrinsic_vssrl_mask_vx_nxv2i64_nxv2i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vssrl.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1848,7 +1890,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1878,12 +1920,13 @@ , i64, , + i64, i64); define @intrinsic_vssrl_mask_vx_nxv4i64_nxv4i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vssrl.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1892,7 +1935,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1922,12 +1965,13 @@ , i64, , + i64, i64); define @intrinsic_vssrl_mask_vx_nxv8i64_nxv8i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vssrl.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1936,7 +1980,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1959,7 +2003,7 @@ define @intrinsic_vssrl_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1968,7 +2012,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1991,7 +2035,7 @@ define @intrinsic_vssrl_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2000,7 +2044,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2023,7 +2067,7 @@ define @intrinsic_vssrl_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2032,7 +2076,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2055,7 +2099,7 @@ define @intrinsic_vssrl_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2064,7 +2108,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2087,7 +2131,7 @@ define @intrinsic_vssrl_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vssrl.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2096,7 +2140,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2119,7 +2163,7 @@ define @intrinsic_vssrl_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vssrl.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2128,7 +2172,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2151,7 +2195,7 @@ define @intrinsic_vssrl_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vssrl.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2160,7 +2204,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2183,7 +2227,7 @@ define @intrinsic_vssrl_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2192,7 +2236,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2215,7 +2259,7 @@ define @intrinsic_vssrl_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2224,7 +2268,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2247,7 +2291,7 @@ define @intrinsic_vssrl_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2256,7 +2300,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2279,7 +2323,7 @@ define @intrinsic_vssrl_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vssrl.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2288,7 +2332,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2311,7 +2355,7 @@ define @intrinsic_vssrl_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vssrl.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2320,7 +2364,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2343,7 +2387,7 @@ define @intrinsic_vssrl_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vssrl.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2352,7 +2396,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2375,7 +2419,7 @@ define @intrinsic_vssrl_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2384,7 +2428,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2407,7 +2451,7 @@ define @intrinsic_vssrl_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2416,7 +2460,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2439,7 +2483,7 @@ define @intrinsic_vssrl_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vssrl.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2448,7 +2492,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2471,7 +2515,7 @@ define @intrinsic_vssrl_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vssrl.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2480,7 +2524,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2503,7 +2547,7 @@ define @intrinsic_vssrl_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vssrl.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2512,7 +2556,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2535,7 +2579,7 @@ define @intrinsic_vssrl_mask_vi_nxv1i64_nxv1i64_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2544,7 +2588,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2567,7 +2611,7 @@ define @intrinsic_vssrl_mask_vi_nxv2i64_nxv2i64_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vssrl.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2576,7 +2620,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2599,7 +2643,7 @@ define @intrinsic_vssrl_mask_vi_nxv4i64_nxv4i64_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vssrl.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2608,7 +2652,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2631,7 +2675,7 @@ define @intrinsic_vssrl_mask_vi_nxv8i64_nxv8i64_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vssrl.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2640,7 +2684,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vssub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssub-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssub-rv32.ll @@ -26,12 +26,13 @@ , , , + i32, i32); define @intrinsic_vssub_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vssub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -70,12 +71,13 @@ , , , + i32, i32); define @intrinsic_vssub_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vssub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -114,12 +116,13 @@ , , , + i32, i32); define @intrinsic_vssub_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vssub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -158,12 +161,13 @@ , , , + i32, i32); define @intrinsic_vssub_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vssub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -202,12 +206,13 @@ , , , + i32, i32); define @intrinsic_vssub_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vssub.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -246,12 +251,13 @@ , , , + i32, i32); define @intrinsic_vssub_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vssub.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -260,7 +266,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -290,13 +296,14 @@ , , , + i32, i32); define @intrinsic_vssub_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vssub.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -335,12 +342,13 @@ , , , + i32, i32); define @intrinsic_vssub_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vssub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -379,12 +387,13 @@ , , , + i32, i32); define @intrinsic_vssub_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vssub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -423,12 +432,13 @@ , , , + i32, i32); define @intrinsic_vssub_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vssub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -467,12 +477,13 @@ , , , + i32, i32); define @intrinsic_vssub_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vssub.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -481,7 +492,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -511,12 +522,13 @@ , , , + i32, i32); define @intrinsic_vssub_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vssub.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -525,7 +537,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -555,13 +567,14 @@ , , , + i32, i32); define @intrinsic_vssub_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vssub.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -600,12 +613,13 @@ , , , + i32, i32); define @intrinsic_vssub_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vssub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -644,12 +658,13 @@ , , , + i32, i32); define @intrinsic_vssub_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vssub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -658,7 +673,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -688,12 +703,13 @@ , , , + i32, i32); define @intrinsic_vssub_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vssub.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -702,7 +718,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -732,12 +748,13 @@ , , , + i32, i32); define @intrinsic_vssub_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vssub.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +763,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -776,13 +793,14 @@ , , , + i32, i32); define @intrinsic_vssub_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vssub.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +809,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -821,12 +839,13 @@ , , , + i32, i32); define @intrinsic_vssub_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vssub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +854,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -865,12 +884,13 @@ , , , + i32, i32); define @intrinsic_vssub_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vssub.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +899,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -909,12 +929,13 @@ , , , + i32, i32); define @intrinsic_vssub_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vssub.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +944,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -953,13 +974,14 @@ , , , + i32, i32); define @intrinsic_vssub_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vssub.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -968,7 +990,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -998,12 +1020,13 @@ , i8, , + i32, i32); define @intrinsic_vssub_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vssub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1012,7 +1035,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1042,12 +1065,13 @@ , i8, , + i32, i32); define @intrinsic_vssub_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vssub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1056,7 +1080,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1086,12 +1110,13 @@ , i8, , + i32, i32); define @intrinsic_vssub_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vssub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1100,7 +1125,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1130,12 +1155,13 @@ , i8, , + i32, i32); define @intrinsic_vssub_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vssub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1144,7 +1170,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1174,12 +1200,13 @@ , i8, , + i32, i32); define @intrinsic_vssub_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vssub.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1188,7 +1215,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1218,12 +1245,13 @@ , i8, , + i32, i32); define @intrinsic_vssub_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vssub.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1232,7 +1260,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1262,12 +1290,13 @@ , i8, , + i32, i32); define @intrinsic_vssub_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vssub.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1276,7 +1305,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1306,12 +1335,13 @@ , i16, , + i32, i32); define @intrinsic_vssub_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vssub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1320,7 +1350,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1350,12 +1380,13 @@ , i16, , + i32, i32); define @intrinsic_vssub_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vssub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1364,7 +1395,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1394,12 +1425,13 @@ , i16, , + i32, i32); define @intrinsic_vssub_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vssub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1408,7 +1440,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1438,12 +1470,13 @@ , i16, , + i32, i32); define @intrinsic_vssub_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vssub.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1452,7 +1485,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1482,12 +1515,13 @@ , i16, , + i32, i32); define @intrinsic_vssub_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vssub.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1496,7 +1530,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1526,12 +1560,13 @@ , i16, , + i32, i32); define @intrinsic_vssub_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vssub.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1540,7 +1575,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1570,12 +1605,13 @@ , i32, , + i32, i32); define @intrinsic_vssub_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vssub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1584,7 +1620,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1614,12 +1650,13 @@ , i32, , + i32, i32); define @intrinsic_vssub_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vssub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1628,7 +1665,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1658,12 +1695,13 @@ , i32, , + i32, i32); define @intrinsic_vssub_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vssub.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1672,7 +1710,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1702,12 +1740,13 @@ , i32, , + i32, i32); define @intrinsic_vssub_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vssub.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1716,7 +1755,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1746,12 +1785,13 @@ , i32, , + i32, i32); define @intrinsic_vssub_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vssub.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1760,7 +1800,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1796,6 +1836,7 @@ , i64, , + i32, i32); define @intrinsic_vssub_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1804,10 +1845,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v25, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vssub.vv v8, v9, v25, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1817,7 +1858,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1853,6 +1894,7 @@ , i64, , + i32, i32); define @intrinsic_vssub_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1861,10 +1903,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v26, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vssub.vv v8, v10, v26, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1874,7 +1916,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1910,6 +1952,7 @@ , i64, , + i32, i32); define @intrinsic_vssub_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1918,10 +1961,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v28, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vssub.vv v8, v12, v28, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1931,7 +1974,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1967,6 +2010,7 @@ , i64, , + i32, i32); define @intrinsic_vssub_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1975,10 +2019,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v24, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vssub.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1988,7 +2032,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vssub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssub-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssub-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssub-rv64.ll @@ -26,12 +26,13 @@ , , , + i64, i64); define @intrinsic_vssub_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vssub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -70,12 +71,13 @@ , , , + i64, i64); define @intrinsic_vssub_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vssub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -114,12 +116,13 @@ , , , + i64, i64); define @intrinsic_vssub_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vssub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -158,12 +161,13 @@ , , , + i64, i64); define @intrinsic_vssub_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vssub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -202,12 +206,13 @@ , , , + i64, i64); define @intrinsic_vssub_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vssub.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -246,12 +251,13 @@ , , , + i64, i64); define @intrinsic_vssub_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vssub.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -260,7 +266,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -290,13 +296,14 @@ , , , + i64, i64); define @intrinsic_vssub_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vssub.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -335,12 +342,13 @@ , , , + i64, i64); define @intrinsic_vssub_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vssub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -379,12 +387,13 @@ , , , + i64, i64); define @intrinsic_vssub_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vssub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -423,12 +432,13 @@ , , , + i64, i64); define @intrinsic_vssub_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vssub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -467,12 +477,13 @@ , , , + i64, i64); define @intrinsic_vssub_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vssub.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -481,7 +492,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -511,12 +522,13 @@ , , , + i64, i64); define @intrinsic_vssub_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vssub.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -525,7 +537,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -555,13 +567,14 @@ , , , + i64, i64); define @intrinsic_vssub_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vssub.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -600,12 +613,13 @@ , , , + i64, i64); define @intrinsic_vssub_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vssub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -644,12 +658,13 @@ , , , + i64, i64); define @intrinsic_vssub_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vssub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -658,7 +673,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -688,12 +703,13 @@ , , , + i64, i64); define @intrinsic_vssub_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vssub.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -702,7 +718,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -732,12 +748,13 @@ , , , + i64, i64); define @intrinsic_vssub_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vssub.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +763,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -776,13 +793,14 @@ , , , + i64, i64); define @intrinsic_vssub_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vssub.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +809,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -821,12 +839,13 @@ , , , + i64, i64); define @intrinsic_vssub_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vssub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +854,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -865,12 +884,13 @@ , , , + i64, i64); define @intrinsic_vssub_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vssub.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +899,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -909,12 +929,13 @@ , , , + i64, i64); define @intrinsic_vssub_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vssub.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +944,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -953,13 +974,14 @@ , , , + i64, i64); define @intrinsic_vssub_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vssub.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -968,7 +990,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -998,12 +1020,13 @@ , i8, , + i64, i64); define @intrinsic_vssub_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vssub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1012,7 +1035,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1042,12 +1065,13 @@ , i8, , + i64, i64); define @intrinsic_vssub_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vssub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1056,7 +1080,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1086,12 +1110,13 @@ , i8, , + i64, i64); define @intrinsic_vssub_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vssub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1100,7 +1125,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1130,12 +1155,13 @@ , i8, , + i64, i64); define @intrinsic_vssub_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vssub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1144,7 +1170,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1174,12 +1200,13 @@ , i8, , + i64, i64); define @intrinsic_vssub_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vssub.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1188,7 +1215,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1218,12 +1245,13 @@ , i8, , + i64, i64); define @intrinsic_vssub_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vssub.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1232,7 +1260,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1262,12 +1290,13 @@ , i8, , + i64, i64); define @intrinsic_vssub_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vssub.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1276,7 +1305,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1306,12 +1335,13 @@ , i16, , + i64, i64); define @intrinsic_vssub_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vssub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1320,7 +1350,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1350,12 +1380,13 @@ , i16, , + i64, i64); define @intrinsic_vssub_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vssub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1364,7 +1395,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1394,12 +1425,13 @@ , i16, , + i64, i64); define @intrinsic_vssub_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vssub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1408,7 +1440,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1438,12 +1470,13 @@ , i16, , + i64, i64); define @intrinsic_vssub_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vssub.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1452,7 +1485,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1482,12 +1515,13 @@ , i16, , + i64, i64); define @intrinsic_vssub_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vssub.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1496,7 +1530,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1526,12 +1560,13 @@ , i16, , + i64, i64); define @intrinsic_vssub_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vssub.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1540,7 +1575,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1570,12 +1605,13 @@ , i32, , + i64, i64); define @intrinsic_vssub_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vssub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1584,7 +1620,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1614,12 +1650,13 @@ , i32, , + i64, i64); define @intrinsic_vssub_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vssub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1628,7 +1665,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1658,12 +1695,13 @@ , i32, , + i64, i64); define @intrinsic_vssub_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vssub.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1672,7 +1710,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1702,12 +1740,13 @@ , i32, , + i64, i64); define @intrinsic_vssub_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vssub.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1716,7 +1755,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1746,12 +1785,13 @@ , i32, , + i64, i64); define @intrinsic_vssub_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vssub.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1760,7 +1800,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1790,12 +1830,13 @@ , i64, , + i64, i64); define @intrinsic_vssub_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vssub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1804,7 +1845,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1834,12 +1875,13 @@ , i64, , + i64, i64); define @intrinsic_vssub_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vssub.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1848,7 +1890,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1878,12 +1920,13 @@ , i64, , + i64, i64); define @intrinsic_vssub_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vssub.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1892,7 +1935,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1922,12 +1965,13 @@ , i64, , + i64, i64); define @intrinsic_vssub_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vssub.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1936,7 +1980,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vssubu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssubu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssubu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssubu-rv32.ll @@ -26,12 +26,13 @@ , , , + i32, i32); define @intrinsic_vssubu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vssubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -70,12 +71,13 @@ , , , + i32, i32); define @intrinsic_vssubu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vssubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -114,12 +116,13 @@ , , , + i32, i32); define @intrinsic_vssubu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vssubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -158,12 +161,13 @@ , , , + i32, i32); define @intrinsic_vssubu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vssubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -202,12 +206,13 @@ , , , + i32, i32); define @intrinsic_vssubu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vssubu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -246,12 +251,13 @@ , , , + i32, i32); define @intrinsic_vssubu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vssubu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -260,7 +266,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -290,13 +296,14 @@ , , , + i32, i32); define @intrinsic_vssubu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vssubu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -335,12 +342,13 @@ , , , + i32, i32); define @intrinsic_vssubu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vssubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -379,12 +387,13 @@ , , , + i32, i32); define @intrinsic_vssubu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vssubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -423,12 +432,13 @@ , , , + i32, i32); define @intrinsic_vssubu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vssubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -467,12 +477,13 @@ , , , + i32, i32); define @intrinsic_vssubu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vssubu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -481,7 +492,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -511,12 +522,13 @@ , , , + i32, i32); define @intrinsic_vssubu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vssubu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -525,7 +537,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -555,13 +567,14 @@ , , , + i32, i32); define @intrinsic_vssubu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vssubu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -600,12 +613,13 @@ , , , + i32, i32); define @intrinsic_vssubu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vssubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -644,12 +658,13 @@ , , , + i32, i32); define @intrinsic_vssubu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vssubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -658,7 +673,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -688,12 +703,13 @@ , , , + i32, i32); define @intrinsic_vssubu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vssubu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -702,7 +718,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -732,12 +748,13 @@ , , , + i32, i32); define @intrinsic_vssubu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vssubu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +763,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -776,13 +793,14 @@ , , , + i32, i32); define @intrinsic_vssubu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vssubu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +809,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -821,12 +839,13 @@ , , , + i32, i32); define @intrinsic_vssubu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vssubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +854,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -865,12 +884,13 @@ , , , + i32, i32); define @intrinsic_vssubu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vssubu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +899,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -909,12 +929,13 @@ , , , + i32, i32); define @intrinsic_vssubu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vssubu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +944,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -953,13 +974,14 @@ , , , + i32, i32); define @intrinsic_vssubu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vssubu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -968,7 +990,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -998,12 +1020,13 @@ , i8, , + i32, i32); define @intrinsic_vssubu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vssubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1012,7 +1035,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1042,12 +1065,13 @@ , i8, , + i32, i32); define @intrinsic_vssubu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vssubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1056,7 +1080,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1086,12 +1110,13 @@ , i8, , + i32, i32); define @intrinsic_vssubu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vssubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1100,7 +1125,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1130,12 +1155,13 @@ , i8, , + i32, i32); define @intrinsic_vssubu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vssubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1144,7 +1170,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1174,12 +1200,13 @@ , i8, , + i32, i32); define @intrinsic_vssubu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vssubu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1188,7 +1215,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1218,12 +1245,13 @@ , i8, , + i32, i32); define @intrinsic_vssubu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vssubu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1232,7 +1260,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1262,12 +1290,13 @@ , i8, , + i32, i32); define @intrinsic_vssubu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vssubu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1276,7 +1305,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1306,12 +1335,13 @@ , i16, , + i32, i32); define @intrinsic_vssubu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vssubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1320,7 +1350,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1350,12 +1380,13 @@ , i16, , + i32, i32); define @intrinsic_vssubu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vssubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1364,7 +1395,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1394,12 +1425,13 @@ , i16, , + i32, i32); define @intrinsic_vssubu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vssubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1408,7 +1440,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1438,12 +1470,13 @@ , i16, , + i32, i32); define @intrinsic_vssubu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vssubu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1452,7 +1485,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1482,12 +1515,13 @@ , i16, , + i32, i32); define @intrinsic_vssubu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vssubu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1496,7 +1530,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1526,12 +1560,13 @@ , i16, , + i32, i32); define @intrinsic_vssubu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vssubu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1540,7 +1575,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1570,12 +1605,13 @@ , i32, , + i32, i32); define @intrinsic_vssubu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vssubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1584,7 +1620,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1614,12 +1650,13 @@ , i32, , + i32, i32); define @intrinsic_vssubu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vssubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1628,7 +1665,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1658,12 +1695,13 @@ , i32, , + i32, i32); define @intrinsic_vssubu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vssubu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1672,7 +1710,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1702,12 +1740,13 @@ , i32, , + i32, i32); define @intrinsic_vssubu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vssubu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1716,7 +1755,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1746,12 +1785,13 @@ , i32, , + i32, i32); define @intrinsic_vssubu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vssubu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1760,7 +1800,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1796,6 +1836,7 @@ , i64, , + i32, i32); define @intrinsic_vssubu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1804,10 +1845,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v25, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vssubu.vv v8, v9, v25, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1817,7 +1858,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1853,6 +1894,7 @@ , i64, , + i32, i32); define @intrinsic_vssubu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1861,10 +1903,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v26, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vssubu.vv v8, v10, v26, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1874,7 +1916,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1910,6 +1952,7 @@ , i64, , + i32, i32); define @intrinsic_vssubu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1918,10 +1961,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v28, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vssubu.vv v8, v12, v28, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1931,7 +1974,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1967,6 +2010,7 @@ , i64, , + i32, i32); define @intrinsic_vssubu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1975,10 +2019,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v24, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vssubu.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1988,7 +2032,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vssubu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssubu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssubu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssubu-rv64.ll @@ -26,12 +26,13 @@ , , , + i64, i64); define @intrinsic_vssubu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vssubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -70,12 +71,13 @@ , , , + i64, i64); define @intrinsic_vssubu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vssubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -114,12 +116,13 @@ , , , + i64, i64); define @intrinsic_vssubu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vssubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -158,12 +161,13 @@ , , , + i64, i64); define @intrinsic_vssubu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vssubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -202,12 +206,13 @@ , , , + i64, i64); define @intrinsic_vssubu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vssubu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -246,12 +251,13 @@ , , , + i64, i64); define @intrinsic_vssubu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vssubu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -260,7 +266,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -290,13 +296,14 @@ , , , + i64, i64); define @intrinsic_vssubu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vssubu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -335,12 +342,13 @@ , , , + i64, i64); define @intrinsic_vssubu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vssubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -379,12 +387,13 @@ , , , + i64, i64); define @intrinsic_vssubu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vssubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -423,12 +432,13 @@ , , , + i64, i64); define @intrinsic_vssubu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vssubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -467,12 +477,13 @@ , , , + i64, i64); define @intrinsic_vssubu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vssubu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -481,7 +492,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -511,12 +522,13 @@ , , , + i64, i64); define @intrinsic_vssubu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vssubu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -525,7 +537,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -555,13 +567,14 @@ , , , + i64, i64); define @intrinsic_vssubu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vssubu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -600,12 +613,13 @@ , , , + i64, i64); define @intrinsic_vssubu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vssubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -644,12 +658,13 @@ , , , + i64, i64); define @intrinsic_vssubu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vssubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -658,7 +673,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -688,12 +703,13 @@ , , , + i64, i64); define @intrinsic_vssubu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vssubu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -702,7 +718,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -732,12 +748,13 @@ , , , + i64, i64); define @intrinsic_vssubu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vssubu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +763,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -776,13 +793,14 @@ , , , + i64, i64); define @intrinsic_vssubu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vssubu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +809,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -821,12 +839,13 @@ , , , + i64, i64); define @intrinsic_vssubu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vssubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +854,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -865,12 +884,13 @@ , , , + i64, i64); define @intrinsic_vssubu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vssubu.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +899,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -909,12 +929,13 @@ , , , + i64, i64); define @intrinsic_vssubu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vssubu.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +944,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -953,13 +974,14 @@ , , , + i64, i64); define @intrinsic_vssubu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vssubu.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -968,7 +990,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -998,12 +1020,13 @@ , i8, , + i64, i64); define @intrinsic_vssubu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vssubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1012,7 +1035,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1042,12 +1065,13 @@ , i8, , + i64, i64); define @intrinsic_vssubu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vssubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1056,7 +1080,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1086,12 +1110,13 @@ , i8, , + i64, i64); define @intrinsic_vssubu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vssubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1100,7 +1125,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1130,12 +1155,13 @@ , i8, , + i64, i64); define @intrinsic_vssubu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vssubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1144,7 +1170,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1174,12 +1200,13 @@ , i8, , + i64, i64); define @intrinsic_vssubu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vssubu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1188,7 +1215,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1218,12 +1245,13 @@ , i8, , + i64, i64); define @intrinsic_vssubu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vssubu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1232,7 +1260,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1262,12 +1290,13 @@ , i8, , + i64, i64); define @intrinsic_vssubu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vssubu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1276,7 +1305,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1306,12 +1335,13 @@ , i16, , + i64, i64); define @intrinsic_vssubu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vssubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1320,7 +1350,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1350,12 +1380,13 @@ , i16, , + i64, i64); define @intrinsic_vssubu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vssubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1364,7 +1395,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1394,12 +1425,13 @@ , i16, , + i64, i64); define @intrinsic_vssubu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vssubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1408,7 +1440,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1438,12 +1470,13 @@ , i16, , + i64, i64); define @intrinsic_vssubu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vssubu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1452,7 +1485,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1482,12 +1515,13 @@ , i16, , + i64, i64); define @intrinsic_vssubu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vssubu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1496,7 +1530,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1526,12 +1560,13 @@ , i16, , + i64, i64); define @intrinsic_vssubu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vssubu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1540,7 +1575,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1570,12 +1605,13 @@ , i32, , + i64, i64); define @intrinsic_vssubu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vssubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1584,7 +1620,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1614,12 +1650,13 @@ , i32, , + i64, i64); define @intrinsic_vssubu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vssubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1628,7 +1665,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1658,12 +1695,13 @@ , i32, , + i64, i64); define @intrinsic_vssubu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vssubu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1672,7 +1710,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1702,12 +1740,13 @@ , i32, , + i64, i64); define @intrinsic_vssubu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vssubu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1716,7 +1755,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1746,12 +1785,13 @@ , i32, , + i64, i64); define @intrinsic_vssubu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vssubu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1760,7 +1800,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1790,12 +1830,13 @@ , i64, , + i64, i64); define @intrinsic_vssubu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vssubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1804,7 +1845,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1834,12 +1875,13 @@ , i64, , + i64, i64); define @intrinsic_vssubu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vssubu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1848,7 +1890,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1878,12 +1920,13 @@ , i64, , + i64, i64); define @intrinsic_vssubu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vssubu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1892,7 +1935,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1922,12 +1965,13 @@ , i64, , + i64, i64); define @intrinsic_vssubu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vssubu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1936,7 +1980,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsub-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsub-rv32.ll @@ -26,12 +26,13 @@ , , , + i32, i32); define @intrinsic_vsub_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vsub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -70,12 +71,13 @@ , , , + i32, i32); define @intrinsic_vsub_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vsub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -114,12 +116,13 @@ , , , + i32, i32); define @intrinsic_vsub_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vsub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -158,12 +161,13 @@ , , , + i32, i32); define @intrinsic_vsub_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vsub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -202,12 +206,13 @@ , , , + i32, i32); define @intrinsic_vsub_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vsub.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -246,12 +251,13 @@ , , , + i32, i32); define @intrinsic_vsub_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vsub.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -260,7 +266,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -290,13 +296,14 @@ , , , + i32, i32); define @intrinsic_vsub_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vsub.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -335,12 +342,13 @@ , , , + i32, i32); define @intrinsic_vsub_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vsub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -379,12 +387,13 @@ , , , + i32, i32); define @intrinsic_vsub_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vsub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -423,12 +432,13 @@ , , , + i32, i32); define @intrinsic_vsub_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vsub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -467,12 +477,13 @@ , , , + i32, i32); define @intrinsic_vsub_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vsub.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -481,7 +492,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -511,12 +522,13 @@ , , , + i32, i32); define @intrinsic_vsub_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vsub.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -525,7 +537,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -555,13 +567,14 @@ , , , + i32, i32); define @intrinsic_vsub_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vsub.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -600,12 +613,13 @@ , , , + i32, i32); define @intrinsic_vsub_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vsub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -644,12 +658,13 @@ , , , + i32, i32); define @intrinsic_vsub_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vsub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -658,7 +673,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -688,12 +703,13 @@ , , , + i32, i32); define @intrinsic_vsub_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vsub.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -702,7 +718,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -732,12 +748,13 @@ , , , + i32, i32); define @intrinsic_vsub_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vsub.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +763,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -776,13 +793,14 @@ , , , + i32, i32); define @intrinsic_vsub_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vsub.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +809,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -821,12 +839,13 @@ , , , + i32, i32); define @intrinsic_vsub_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vsub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +854,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -865,12 +884,13 @@ , , , + i32, i32); define @intrinsic_vsub_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vsub.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +899,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -909,12 +929,13 @@ , , , + i32, i32); define @intrinsic_vsub_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vsub.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +944,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -953,13 +974,14 @@ , , , + i32, i32); define @intrinsic_vsub_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vsub.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -968,7 +990,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -998,12 +1020,13 @@ , i8, , + i32, i32); define @intrinsic_vsub_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1012,7 +1035,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1042,12 +1065,13 @@ , i8, , + i32, i32); define @intrinsic_vsub_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1056,7 +1080,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1086,12 +1110,13 @@ , i8, , + i32, i32); define @intrinsic_vsub_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1100,7 +1125,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1130,12 +1155,13 @@ , i8, , + i32, i32); define @intrinsic_vsub_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1144,7 +1170,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1174,12 +1200,13 @@ , i8, , + i32, i32); define @intrinsic_vsub_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsub.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1188,7 +1215,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1218,12 +1245,13 @@ , i8, , + i32, i32); define @intrinsic_vsub_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vsub.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1232,7 +1260,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1262,12 +1290,13 @@ , i8, , + i32, i32); define @intrinsic_vsub_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vsub.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1276,7 +1305,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1306,12 +1335,13 @@ , i16, , + i32, i32); define @intrinsic_vsub_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1320,7 +1350,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1350,12 +1380,13 @@ , i16, , + i32, i32); define @intrinsic_vsub_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1364,7 +1395,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1394,12 +1425,13 @@ , i16, , + i32, i32); define @intrinsic_vsub_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1408,7 +1440,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1438,12 +1470,13 @@ , i16, , + i32, i32); define @intrinsic_vsub_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsub.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1452,7 +1485,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1482,12 +1515,13 @@ , i16, , + i32, i32); define @intrinsic_vsub_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsub.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1496,7 +1530,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1526,12 +1560,13 @@ , i16, , + i32, i32); define @intrinsic_vsub_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vsub.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1540,7 +1575,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1570,12 +1605,13 @@ , i32, , + i32, i32); define @intrinsic_vsub_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1584,7 +1620,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1614,12 +1650,13 @@ , i32, , + i32, i32); define @intrinsic_vsub_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1628,7 +1665,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1658,12 +1695,13 @@ , i32, , + i32, i32); define @intrinsic_vsub_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsub.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1672,7 +1710,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1702,12 +1740,13 @@ , i32, , + i32, i32); define @intrinsic_vsub_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsub.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1716,7 +1755,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1746,12 +1785,13 @@ , i32, , + i32, i32); define @intrinsic_vsub_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vsub.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1760,7 +1800,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1796,6 +1836,7 @@ , i64, , + i32, i32); define @intrinsic_vsub_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1804,10 +1845,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v25, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vsub.vv v8, v9, v25, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1817,7 +1858,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1853,6 +1894,7 @@ , i64, , + i32, i32); define @intrinsic_vsub_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1861,10 +1903,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v26, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vsub.vv v8, v10, v26, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1874,7 +1916,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1910,6 +1952,7 @@ , i64, , + i32, i32); define @intrinsic_vsub_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1918,10 +1961,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v28, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vsub.vv v8, v12, v28, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1931,7 +1974,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1967,6 +2010,7 @@ , i64, , + i32, i32); define @intrinsic_vsub_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1975,10 +2019,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v24, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vsub.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1988,7 +2032,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2011,7 +2055,7 @@ define @intrinsic_vsub_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2020,7 +2064,7 @@ %1, i8 -9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2043,7 +2087,7 @@ define @intrinsic_vsub_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2052,7 +2096,7 @@ %1, i8 -9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2075,7 +2119,7 @@ define @intrinsic_vsub_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2084,7 +2128,7 @@ %1, i8 -9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2107,7 +2151,7 @@ define @intrinsic_vsub_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2116,7 +2160,7 @@ %1, i8 -9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2139,7 +2183,7 @@ define @intrinsic_vsub_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vadd.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2148,7 +2192,7 @@ %1, i8 -9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2171,7 +2215,7 @@ define @intrinsic_vsub_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vadd.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2180,7 +2224,7 @@ %1, i8 -9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2203,7 +2247,7 @@ define @intrinsic_vsub_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vadd.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2212,7 +2256,7 @@ %1, i8 -9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2235,7 +2279,7 @@ define @intrinsic_vsub_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2244,7 +2288,7 @@ %1, i16 -9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2267,7 +2311,7 @@ define @intrinsic_vsub_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2276,7 +2320,7 @@ %1, i16 -9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2299,7 +2343,7 @@ define @intrinsic_vsub_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2308,7 +2352,7 @@ %1, i16 -9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2331,7 +2375,7 @@ define @intrinsic_vsub_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vadd.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2340,7 +2384,7 @@ %1, i16 -9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2363,7 +2407,7 @@ define @intrinsic_vsub_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vadd.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2372,7 +2416,7 @@ %1, i16 -9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2395,7 +2439,7 @@ define @intrinsic_vsub_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vadd.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2404,7 +2448,7 @@ %1, i16 -9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2427,7 +2471,7 @@ define @intrinsic_vsub_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2436,7 +2480,7 @@ %1, i32 -9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2459,7 +2503,7 @@ define @intrinsic_vsub_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2468,7 +2512,7 @@ %1, i32 -9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2491,7 +2535,7 @@ define @intrinsic_vsub_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vadd.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2500,7 +2544,7 @@ %1, i32 -9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2523,7 +2567,7 @@ define @intrinsic_vsub_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vadd.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2532,7 +2576,7 @@ %1, i32 -9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2555,7 +2599,7 @@ define @intrinsic_vsub_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vadd.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2564,7 +2608,7 @@ %1, i32 -9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2587,7 +2631,7 @@ define @intrinsic_vsub_mask_vi_nxv1i64_nxv1i64_i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t ; CHECK-NEXT: ret entry: @@ -2596,7 +2640,7 @@ %1, i64 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2619,7 +2663,7 @@ define @intrinsic_vsub_mask_vi_nxv2i64_nxv2i64_i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vadd.vi v8, v10, -9, v0.t ; CHECK-NEXT: ret entry: @@ -2628,7 +2672,7 @@ %1, i64 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2651,7 +2695,7 @@ define @intrinsic_vsub_mask_vi_nxv4i64_nxv4i64_i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vadd.vi v8, v12, -9, v0.t ; CHECK-NEXT: ret entry: @@ -2660,7 +2704,7 @@ %1, i64 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2683,7 +2727,7 @@ define @intrinsic_vsub_mask_vi_nxv8i64_nxv8i64_i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vadd.vi v8, v16, -9, v0.t ; CHECK-NEXT: ret entry: @@ -2692,7 +2736,7 @@ %1, i64 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsub-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsub-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsub-rv64.ll @@ -26,12 +26,13 @@ , , , + i64, i64); define @intrinsic_vsub_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vsub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -70,12 +71,13 @@ , , , + i64, i64); define @intrinsic_vsub_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vsub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -114,12 +116,13 @@ , , , + i64, i64); define @intrinsic_vsub_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vsub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -158,12 +161,13 @@ , , , + i64, i64); define @intrinsic_vsub_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vsub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -202,12 +206,13 @@ , , , + i64, i64); define @intrinsic_vsub_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vsub.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -246,12 +251,13 @@ , , , + i64, i64); define @intrinsic_vsub_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vsub.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -260,7 +266,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -290,13 +296,14 @@ , , , + i64, i64); define @intrinsic_vsub_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vsub.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -335,12 +342,13 @@ , , , + i64, i64); define @intrinsic_vsub_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vsub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -379,12 +387,13 @@ , , , + i64, i64); define @intrinsic_vsub_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vsub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -423,12 +432,13 @@ , , , + i64, i64); define @intrinsic_vsub_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vsub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -467,12 +477,13 @@ , , , + i64, i64); define @intrinsic_vsub_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vsub.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -481,7 +492,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -511,12 +522,13 @@ , , , + i64, i64); define @intrinsic_vsub_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vsub.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -525,7 +537,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -555,13 +567,14 @@ , , , + i64, i64); define @intrinsic_vsub_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vsub.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -600,12 +613,13 @@ , , , + i64, i64); define @intrinsic_vsub_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vsub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -644,12 +658,13 @@ , , , + i64, i64); define @intrinsic_vsub_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vsub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -658,7 +673,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -688,12 +703,13 @@ , , , + i64, i64); define @intrinsic_vsub_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vsub.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -702,7 +718,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -732,12 +748,13 @@ , , , + i64, i64); define @intrinsic_vsub_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vsub.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +763,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -776,13 +793,14 @@ , , , + i64, i64); define @intrinsic_vsub_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vsub.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +809,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -821,12 +839,13 @@ , , , + i64, i64); define @intrinsic_vsub_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vsub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +854,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -865,12 +884,13 @@ , , , + i64, i64); define @intrinsic_vsub_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vsub.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +899,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -909,12 +929,13 @@ , , , + i64, i64); define @intrinsic_vsub_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vsub.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +944,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -953,13 +974,14 @@ , , , + i64, i64); define @intrinsic_vsub_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vsub.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -968,7 +990,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -998,12 +1020,13 @@ , i8, , + i64, i64); define @intrinsic_vsub_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1012,7 +1035,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1042,12 +1065,13 @@ , i8, , + i64, i64); define @intrinsic_vsub_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1056,7 +1080,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1086,12 +1110,13 @@ , i8, , + i64, i64); define @intrinsic_vsub_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1100,7 +1125,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1130,12 +1155,13 @@ , i8, , + i64, i64); define @intrinsic_vsub_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1144,7 +1170,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1174,12 +1200,13 @@ , i8, , + i64, i64); define @intrinsic_vsub_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsub.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1188,7 +1215,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1218,12 +1245,13 @@ , i8, , + i64, i64); define @intrinsic_vsub_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vsub.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1232,7 +1260,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1262,12 +1290,13 @@ , i8, , + i64, i64); define @intrinsic_vsub_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vsub.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1276,7 +1305,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1306,12 +1335,13 @@ , i16, , + i64, i64); define @intrinsic_vsub_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1320,7 +1350,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1350,12 +1380,13 @@ , i16, , + i64, i64); define @intrinsic_vsub_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1364,7 +1395,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1394,12 +1425,13 @@ , i16, , + i64, i64); define @intrinsic_vsub_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1408,7 +1440,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1438,12 +1470,13 @@ , i16, , + i64, i64); define @intrinsic_vsub_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsub.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1452,7 +1485,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1482,12 +1515,13 @@ , i16, , + i64, i64); define @intrinsic_vsub_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsub.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1496,7 +1530,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1526,12 +1560,13 @@ , i16, , + i64, i64); define @intrinsic_vsub_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vsub.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1540,7 +1575,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1570,12 +1605,13 @@ , i32, , + i64, i64); define @intrinsic_vsub_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1584,7 +1620,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1614,12 +1650,13 @@ , i32, , + i64, i64); define @intrinsic_vsub_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1628,7 +1665,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1658,12 +1695,13 @@ , i32, , + i64, i64); define @intrinsic_vsub_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsub.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1672,7 +1710,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1702,12 +1740,13 @@ , i32, , + i64, i64); define @intrinsic_vsub_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsub.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1716,7 +1755,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1746,12 +1785,13 @@ , i32, , + i64, i64); define @intrinsic_vsub_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vsub.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1760,7 +1800,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1790,12 +1830,13 @@ , i64, , + i64, i64); define @intrinsic_vsub_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1804,7 +1845,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1834,12 +1875,13 @@ , i64, , + i64, i64); define @intrinsic_vsub_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsub.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1848,7 +1890,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1878,12 +1920,13 @@ , i64, , + i64, i64); define @intrinsic_vsub_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsub.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1892,7 +1935,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1922,12 +1965,13 @@ , i64, , + i64, i64); define @intrinsic_vsub_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vsub.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1936,7 +1980,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1959,7 +2003,7 @@ define @intrinsic_vsub_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t ; CHECK-NEXT: ret entry: @@ -1968,7 +2012,7 @@ %1, i8 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1991,7 +2035,7 @@ define @intrinsic_vsub_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t ; CHECK-NEXT: ret entry: @@ -2000,7 +2044,7 @@ %1, i8 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2023,7 +2067,7 @@ define @intrinsic_vsub_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t ; CHECK-NEXT: ret entry: @@ -2032,7 +2076,7 @@ %1, i8 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2055,7 +2099,7 @@ define @intrinsic_vsub_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t ; CHECK-NEXT: ret entry: @@ -2064,7 +2108,7 @@ %1, i8 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2087,7 +2131,7 @@ define @intrinsic_vsub_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vadd.vi v8, v10, -9, v0.t ; CHECK-NEXT: ret entry: @@ -2096,7 +2140,7 @@ %1, i8 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2119,7 +2163,7 @@ define @intrinsic_vsub_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vadd.vi v8, v12, -9, v0.t ; CHECK-NEXT: ret entry: @@ -2128,7 +2172,7 @@ %1, i8 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2151,7 +2195,7 @@ define @intrinsic_vsub_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vadd.vi v8, v16, -9, v0.t ; CHECK-NEXT: ret entry: @@ -2160,7 +2204,7 @@ %1, i8 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2183,7 +2227,7 @@ define @intrinsic_vsub_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t ; CHECK-NEXT: ret entry: @@ -2192,7 +2236,7 @@ %1, i16 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2215,7 +2259,7 @@ define @intrinsic_vsub_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t ; CHECK-NEXT: ret entry: @@ -2224,7 +2268,7 @@ %1, i16 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2247,7 +2291,7 @@ define @intrinsic_vsub_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t ; CHECK-NEXT: ret entry: @@ -2256,7 +2300,7 @@ %1, i16 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2279,7 +2323,7 @@ define @intrinsic_vsub_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vadd.vi v8, v10, -9, v0.t ; CHECK-NEXT: ret entry: @@ -2288,7 +2332,7 @@ %1, i16 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2311,7 +2355,7 @@ define @intrinsic_vsub_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vadd.vi v8, v12, -9, v0.t ; CHECK-NEXT: ret entry: @@ -2320,7 +2364,7 @@ %1, i16 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2343,7 +2387,7 @@ define @intrinsic_vsub_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vadd.vi v8, v16, -9, v0.t ; CHECK-NEXT: ret entry: @@ -2352,7 +2396,7 @@ %1, i16 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2375,7 +2419,7 @@ define @intrinsic_vsub_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t ; CHECK-NEXT: ret entry: @@ -2384,7 +2428,7 @@ %1, i32 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2407,7 +2451,7 @@ define @intrinsic_vsub_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t ; CHECK-NEXT: ret entry: @@ -2416,7 +2460,7 @@ %1, i32 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2439,7 +2483,7 @@ define @intrinsic_vsub_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vadd.vi v8, v10, -9, v0.t ; CHECK-NEXT: ret entry: @@ -2448,7 +2492,7 @@ %1, i32 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2471,7 +2515,7 @@ define @intrinsic_vsub_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vadd.vi v8, v12, -9, v0.t ; CHECK-NEXT: ret entry: @@ -2480,7 +2524,7 @@ %1, i32 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2503,7 +2547,7 @@ define @intrinsic_vsub_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vadd.vi v8, v16, -9, v0.t ; CHECK-NEXT: ret entry: @@ -2512,7 +2556,7 @@ %1, i32 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2535,7 +2579,7 @@ define @intrinsic_vsub_mask_vi_nxv1i64_nxv1i64_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t ; CHECK-NEXT: ret entry: @@ -2544,7 +2588,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2567,7 +2611,7 @@ define @intrinsic_vsub_mask_vi_nxv2i64_nxv2i64_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vadd.vi v8, v10, -9, v0.t ; CHECK-NEXT: ret entry: @@ -2576,7 +2620,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2599,7 +2643,7 @@ define @intrinsic_vsub_mask_vi_nxv4i64_nxv4i64_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vadd.vi v8, v12, -9, v0.t ; CHECK-NEXT: ret entry: @@ -2608,7 +2652,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2631,7 +2675,7 @@ define @intrinsic_vsub_mask_vi_nxv8i64_nxv8i64_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vadd.vi v8, v16, -9, v0.t ; CHECK-NEXT: ret entry: @@ -2640,7 +2684,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vwadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwadd-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwadd-rv32.ll @@ -27,12 +27,13 @@ , , , + i32, i32); define @intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vwadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -41,7 +42,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -72,12 +73,13 @@ , , , + i32, i32); define @intrinsic_vwadd_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vwadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -86,7 +88,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -117,12 +119,13 @@ , , , + i32, i32); define @intrinsic_vwadd_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vwadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -131,7 +134,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -162,12 +165,13 @@ , , , + i32, i32); define @intrinsic_vwadd_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vwadd.vv v8, v10, v11, v0.t ; CHECK-NEXT: ret entry: @@ -176,7 +180,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -207,12 +211,13 @@ , , , + i32, i32); define @intrinsic_vwadd_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vwadd.vv v8, v12, v14, v0.t ; CHECK-NEXT: ret entry: @@ -221,7 +226,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -252,12 +257,13 @@ , , , + i32, i32); define @intrinsic_vwadd_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vwadd.vv v8, v16, v20, v0.t ; CHECK-NEXT: ret entry: @@ -266,7 +272,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -297,12 +303,13 @@ , , , + i32, i32); define @intrinsic_vwadd_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vwadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -311,7 +318,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -342,12 +349,13 @@ , , , + i32, i32); define @intrinsic_vwadd_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vwadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -356,7 +364,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -387,12 +395,13 @@ , , , + i32, i32); define @intrinsic_vwadd_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vwadd.vv v8, v10, v11, v0.t ; CHECK-NEXT: ret entry: @@ -401,7 +410,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -432,12 +441,13 @@ , , , + i32, i32); define @intrinsic_vwadd_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vwadd.vv v8, v12, v14, v0.t ; CHECK-NEXT: ret entry: @@ -446,7 +456,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -477,12 +487,13 @@ , , , + i32, i32); define @intrinsic_vwadd_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vwadd.vv v8, v16, v20, v0.t ; CHECK-NEXT: ret entry: @@ -491,7 +502,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -522,12 +533,13 @@ , , , + i32, i32); define @intrinsic_vwadd_mask_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vwadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -536,7 +548,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -567,12 +579,13 @@ , , , + i32, i32); define @intrinsic_vwadd_mask_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vwadd.vv v8, v10, v11, v0.t ; CHECK-NEXT: ret entry: @@ -581,7 +594,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -612,12 +625,13 @@ , , , + i32, i32); define @intrinsic_vwadd_mask_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vwadd.vv v8, v12, v14, v0.t ; CHECK-NEXT: ret entry: @@ -626,7 +640,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -657,12 +671,13 @@ , , , + i32, i32); define @intrinsic_vwadd_mask_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vwadd.vv v8, v16, v20, v0.t ; CHECK-NEXT: ret entry: @@ -671,7 +686,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -702,12 +717,13 @@ , i8, , + i32, i32); define @intrinsic_vwadd_mask_vx_nxv1i16_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv1i16_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vwadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -716,7 +732,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -747,12 +763,13 @@ , i8, , + i32, i32); define @intrinsic_vwadd_mask_vx_nxv2i16_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv2i16_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vwadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -761,7 +778,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -792,12 +809,13 @@ , i8, , + i32, i32); define @intrinsic_vwadd_mask_vx_nxv4i16_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv4i16_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vwadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -806,7 +824,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -837,12 +855,13 @@ , i8, , + i32, i32); define @intrinsic_vwadd_mask_vx_nxv8i16_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv8i16_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vwadd.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -851,7 +870,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -882,12 +901,13 @@ , i8, , + i32, i32); define @intrinsic_vwadd_mask_vx_nxv16i16_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv16i16_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vwadd.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -896,7 +916,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -927,12 +947,13 @@ , i8, , + i32, i32); define @intrinsic_vwadd_mask_vx_nxv32i16_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv32i16_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vwadd.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -941,7 +962,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -972,12 +993,13 @@ , i16, , + i32, i32); define @intrinsic_vwadd_mask_vx_nxv1i32_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv1i32_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vwadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -986,7 +1008,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1017,12 +1039,13 @@ , i16, , + i32, i32); define @intrinsic_vwadd_mask_vx_nxv2i32_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv2i32_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vwadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1031,7 +1054,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1062,12 +1085,13 @@ , i16, , + i32, i32); define @intrinsic_vwadd_mask_vx_nxv4i32_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv4i32_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vwadd.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1076,7 +1100,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1107,12 +1131,13 @@ , i16, , + i32, i32); define @intrinsic_vwadd_mask_vx_nxv8i32_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv8i32_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vwadd.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1121,7 +1146,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1152,12 +1177,13 @@ , i16, , + i32, i32); define @intrinsic_vwadd_mask_vx_nxv16i32_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv16i32_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vwadd.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1166,7 +1192,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1197,12 +1223,13 @@ , i32, , + i32, i32); define @intrinsic_vwadd_mask_vx_nxv1i64_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv1i64_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vwadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1211,7 +1238,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1242,12 +1269,13 @@ , i32, , + i32, i32); define @intrinsic_vwadd_mask_vx_nxv2i64_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv2i64_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vwadd.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1256,7 +1284,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1287,12 +1315,13 @@ , i32, , + i32, i32); define @intrinsic_vwadd_mask_vx_nxv4i64_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv4i64_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vwadd.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1301,7 +1330,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1332,12 +1361,13 @@ , i32, , + i32, i32); define @intrinsic_vwadd_mask_vx_nxv8i64_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv8i64_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vwadd.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1346,7 +1376,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vwadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwadd-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwadd-rv64.ll @@ -27,12 +27,13 @@ , , , + i64, i64); define @intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vwadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -41,7 +42,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -72,12 +73,13 @@ , , , + i64, i64); define @intrinsic_vwadd_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vwadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -86,7 +88,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -117,12 +119,13 @@ , , , + i64, i64); define @intrinsic_vwadd_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vwadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -131,7 +134,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -162,12 +165,13 @@ , , , + i64, i64); define @intrinsic_vwadd_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vwadd.vv v8, v10, v11, v0.t ; CHECK-NEXT: ret entry: @@ -176,7 +180,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -207,12 +211,13 @@ , , , + i64, i64); define @intrinsic_vwadd_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vwadd.vv v8, v12, v14, v0.t ; CHECK-NEXT: ret entry: @@ -221,7 +226,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -252,12 +257,13 @@ , , , + i64, i64); define @intrinsic_vwadd_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vwadd.vv v8, v16, v20, v0.t ; CHECK-NEXT: ret entry: @@ -266,7 +272,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -297,12 +303,13 @@ , , , + i64, i64); define @intrinsic_vwadd_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vwadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -311,7 +318,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -342,12 +349,13 @@ , , , + i64, i64); define @intrinsic_vwadd_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vwadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -356,7 +364,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -387,12 +395,13 @@ , , , + i64, i64); define @intrinsic_vwadd_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vwadd.vv v8, v10, v11, v0.t ; CHECK-NEXT: ret entry: @@ -401,7 +410,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -432,12 +441,13 @@ , , , + i64, i64); define @intrinsic_vwadd_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vwadd.vv v8, v12, v14, v0.t ; CHECK-NEXT: ret entry: @@ -446,7 +456,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -477,12 +487,13 @@ , , , + i64, i64); define @intrinsic_vwadd_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vwadd.vv v8, v16, v20, v0.t ; CHECK-NEXT: ret entry: @@ -491,7 +502,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -522,12 +533,13 @@ , , , + i64, i64); define @intrinsic_vwadd_mask_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vwadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -536,7 +548,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -567,12 +579,13 @@ , , , + i64, i64); define @intrinsic_vwadd_mask_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vwadd.vv v8, v10, v11, v0.t ; CHECK-NEXT: ret entry: @@ -581,7 +594,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -612,12 +625,13 @@ , , , + i64, i64); define @intrinsic_vwadd_mask_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vwadd.vv v8, v12, v14, v0.t ; CHECK-NEXT: ret entry: @@ -626,7 +640,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -657,12 +671,13 @@ , , , + i64, i64); define @intrinsic_vwadd_mask_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vwadd.vv v8, v16, v20, v0.t ; CHECK-NEXT: ret entry: @@ -671,7 +686,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -702,12 +717,13 @@ , i8, , + i64, i64); define @intrinsic_vwadd_mask_vx_nxv1i16_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv1i16_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vwadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -716,7 +732,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -747,12 +763,13 @@ , i8, , + i64, i64); define @intrinsic_vwadd_mask_vx_nxv2i16_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv2i16_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vwadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -761,7 +778,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -792,12 +809,13 @@ , i8, , + i64, i64); define @intrinsic_vwadd_mask_vx_nxv4i16_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv4i16_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vwadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -806,7 +824,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -837,12 +855,13 @@ , i8, , + i64, i64); define @intrinsic_vwadd_mask_vx_nxv8i16_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv8i16_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vwadd.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -851,7 +870,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -882,12 +901,13 @@ , i8, , + i64, i64); define @intrinsic_vwadd_mask_vx_nxv16i16_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv16i16_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vwadd.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -896,7 +916,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -927,12 +947,13 @@ , i8, , + i64, i64); define @intrinsic_vwadd_mask_vx_nxv32i16_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv32i16_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vwadd.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -941,7 +962,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -972,12 +993,13 @@ , i16, , + i64, i64); define @intrinsic_vwadd_mask_vx_nxv1i32_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv1i32_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vwadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -986,7 +1008,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1017,12 +1039,13 @@ , i16, , + i64, i64); define @intrinsic_vwadd_mask_vx_nxv2i32_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv2i32_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vwadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1031,7 +1054,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1062,12 +1085,13 @@ , i16, , + i64, i64); define @intrinsic_vwadd_mask_vx_nxv4i32_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv4i32_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vwadd.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1076,7 +1100,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1107,12 +1131,13 @@ , i16, , + i64, i64); define @intrinsic_vwadd_mask_vx_nxv8i32_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv8i32_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vwadd.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1121,7 +1146,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1152,12 +1177,13 @@ , i16, , + i64, i64); define @intrinsic_vwadd_mask_vx_nxv16i32_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv16i32_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vwadd.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1166,7 +1192,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1197,12 +1223,13 @@ , i32, , + i64, i64); define @intrinsic_vwadd_mask_vx_nxv1i64_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv1i64_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vwadd.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1211,7 +1238,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1242,12 +1269,13 @@ , i32, , + i64, i64); define @intrinsic_vwadd_mask_vx_nxv2i64_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv2i64_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vwadd.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1256,7 +1284,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1287,12 +1315,13 @@ , i32, , + i64, i64); define @intrinsic_vwadd_mask_vx_nxv4i64_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv4i64_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vwadd.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1301,7 +1330,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1332,12 +1361,13 @@ , i32, , + i64, i64); define @intrinsic_vwadd_mask_vx_nxv8i64_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv8i64_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vwadd.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1346,7 +1376,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv32.ll @@ -26,12 +26,13 @@ , , , + i32, i32); define @intrinsic_vwadd.w_mask_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vwadd.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -70,12 +71,13 @@ , , , + i32, i32); define @intrinsic_vwadd.w_mask_wv_nxv2i16_nxv2i16_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vwadd.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -114,12 +116,13 @@ , , , + i32, i32); define @intrinsic_vwadd.w_mask_wv_nxv4i16_nxv4i16_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vwadd.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -158,12 +161,13 @@ , , , + i32, i32); define @intrinsic_vwadd.w_mask_wv_nxv8i16_nxv8i16_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vwadd.wv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -202,12 +206,13 @@ , , , + i32, i32); define @intrinsic_vwadd.w_mask_wv_nxv16i16_nxv16i16_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vwadd.wv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -246,13 +251,14 @@ , , , + i32, i32); define @intrinsic_vwadd.w_mask_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl4r.v v28, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vwadd.wv v8, v16, v28, v0.t ; CHECK-NEXT: ret entry: @@ -261,7 +267,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -291,12 +297,13 @@ , , , + i32, i32); define @intrinsic_vwadd.w_mask_wv_nxv1i32_nxv1i32_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vwadd.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -335,12 +342,13 @@ , , , + i32, i32); define @intrinsic_vwadd.w_mask_wv_nxv2i32_nxv2i32_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vwadd.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -379,12 +387,13 @@ , , , + i32, i32); define @intrinsic_vwadd.w_mask_wv_nxv4i32_nxv4i32_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vwadd.wv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -423,12 +432,13 @@ , , , + i32, i32); define @intrinsic_vwadd.w_mask_wv_nxv8i32_nxv8i32_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vwadd.wv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -467,13 +477,14 @@ , , , + i32, i32); define @intrinsic_vwadd.w_mask_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl4re16.v v28, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vwadd.wv v8, v16, v28, v0.t ; CHECK-NEXT: ret entry: @@ -482,7 +493,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -512,12 +523,13 @@ , , , + i32, i32); define @intrinsic_vwadd.w_mask_wv_nxv1i64_nxv1i64_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vwadd.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -526,7 +538,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -556,12 +568,13 @@ , , , + i32, i32); define @intrinsic_vwadd.w_mask_wv_nxv2i64_nxv2i64_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vwadd.wv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -600,12 +613,13 @@ , , , + i32, i32); define @intrinsic_vwadd.w_mask_wv_nxv4i64_nxv4i64_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vwadd.wv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -644,13 +658,14 @@ , , , + i32, i32); define @intrinsic_vwadd.w_mask_wv_nxv8i64_nxv8i64_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl4re32.v v28, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vwadd.wv v8, v16, v28, v0.t ; CHECK-NEXT: ret entry: @@ -659,7 +674,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -689,12 +704,13 @@ , i8, , + i32, i32); define @intrinsic_vwadd.w_mask_wx_nxv1i16_nxv1i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv1i16_nxv1i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vwadd.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -703,7 +719,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -733,12 +749,13 @@ , i8, , + i32, i32); define @intrinsic_vwadd.w_mask_wx_nxv2i16_nxv2i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv2i16_nxv2i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vwadd.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -747,7 +764,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -777,12 +794,13 @@ , i8, , + i32, i32); define @intrinsic_vwadd.w_mask_wx_nxv4i16_nxv4i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv4i16_nxv4i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vwadd.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +809,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -821,12 +839,13 @@ , i8, , + i32, i32); define @intrinsic_vwadd.w_mask_wx_nxv8i16_nxv8i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv8i16_nxv8i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vwadd.wx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +854,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -865,12 +884,13 @@ , i8, , + i32, i32); define @intrinsic_vwadd.w_mask_wx_nxv16i16_nxv16i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv16i16_nxv16i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vwadd.wx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +899,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -909,12 +929,13 @@ , i8, , + i32, i32); define @intrinsic_vwadd.w_mask_wx_nxv32i16_nxv32i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv32i16_nxv32i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vwadd.wx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +944,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -953,12 +974,13 @@ , i16, , + i32, i32); define @intrinsic_vwadd.w_mask_wx_nxv1i32_nxv1i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv1i32_nxv1i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vwadd.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -967,7 +989,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -997,12 +1019,13 @@ , i16, , + i32, i32); define @intrinsic_vwadd.w_mask_wx_nxv2i32_nxv2i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv2i32_nxv2i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vwadd.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1011,7 +1034,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1041,12 +1064,13 @@ , i16, , + i32, i32); define @intrinsic_vwadd.w_mask_wx_nxv4i32_nxv4i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv4i32_nxv4i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vwadd.wx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1055,7 +1079,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1085,12 +1109,13 @@ , i16, , + i32, i32); define @intrinsic_vwadd.w_mask_wx_nxv8i32_nxv8i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv8i32_nxv8i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vwadd.wx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1099,7 +1124,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1129,12 +1154,13 @@ , i16, , + i32, i32); define @intrinsic_vwadd.w_mask_wx_nxv16i32_nxv16i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv16i32_nxv16i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vwadd.wx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1143,7 +1169,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1173,12 +1199,13 @@ , i32, , + i32, i32); define @intrinsic_vwadd.w_mask_wx_nxv1i64_nxv1i64_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv1i64_nxv1i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vwadd.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1187,7 +1214,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1217,12 +1244,13 @@ , i32, , + i32, i32); define @intrinsic_vwadd.w_mask_wx_nxv2i64_nxv2i64_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv2i64_nxv2i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vwadd.wx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1231,7 +1259,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1261,12 +1289,13 @@ , i32, , + i32, i32); define @intrinsic_vwadd.w_mask_wx_nxv4i64_nxv4i64_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv4i64_nxv4i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vwadd.wx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1275,7 +1304,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1305,12 +1334,13 @@ , i32, , + i32, i32); define @intrinsic_vwadd.w_mask_wx_nxv8i64_nxv8i64_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv8i64_nxv8i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vwadd.wx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1319,7 +1349,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1327,7 +1357,7 @@ define @intrinsic_vwadd.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vwadd.wv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -1336,7 +1366,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1344,7 +1374,7 @@ define @intrinsic_vwadd.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vwadd.wv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -1353,7 +1383,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1361,7 +1391,7 @@ define @intrinsic_vwadd.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vwadd.wv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -1370,7 +1400,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1378,7 +1408,7 @@ define @intrinsic_vwadd.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vwadd.wv v8, v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -1387,7 +1417,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1395,7 +1425,7 @@ define @intrinsic_vwadd.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vwadd.wv v8, v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -1404,7 +1434,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1412,7 +1442,7 @@ define @intrinsic_vwadd.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vwadd.wv v8, v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -1421,7 +1451,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1429,7 +1459,7 @@ define @intrinsic_vwadd.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vwadd.wv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -1438,7 +1468,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1446,7 +1476,7 @@ define @intrinsic_vwadd.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vwadd.wv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -1455,7 +1485,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1463,7 +1493,7 @@ define @intrinsic_vwadd.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vwadd.wv v8, v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -1472,7 +1502,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1480,7 +1510,7 @@ define @intrinsic_vwadd.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vwadd.wv v8, v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -1489,7 +1519,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1497,7 +1527,7 @@ define @intrinsic_vwadd.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vwadd.wv v8, v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -1506,7 +1536,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1514,7 +1544,7 @@ define @intrinsic_vwadd.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vwadd.wv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -1523,7 +1553,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1531,7 +1561,7 @@ define @intrinsic_vwadd.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vwadd.wv v8, v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -1540,7 +1570,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1548,7 +1578,7 @@ define @intrinsic_vwadd.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vwadd.wv v8, v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -1557,7 +1587,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1565,7 +1595,7 @@ define @intrinsic_vwadd.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vwadd.wv v8, v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -1574,7 +1604,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1582,7 +1612,7 @@ define @intrinsic_vwadd.w_mask_wx_tie_nxv1i16_nxv1i16_i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv1i16_nxv1i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vwadd.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1591,7 +1621,7 @@ %0, i8 %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1599,7 +1629,7 @@ define @intrinsic_vwadd.w_mask_wx_tie_nxv2i16_nxv2i16_i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv2i16_nxv2i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vwadd.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1608,7 +1638,7 @@ %0, i8 %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1616,7 +1646,7 @@ define @intrinsic_vwadd.w_mask_wx_tie_nxv4i16_nxv4i16_i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv4i16_nxv4i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vwadd.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1625,7 +1655,7 @@ %0, i8 %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1633,7 +1663,7 @@ define @intrinsic_vwadd.w_mask_wx_tie_nxv8i16_nxv8i16_i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv8i16_nxv8i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vwadd.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1642,7 +1672,7 @@ %0, i8 %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1650,7 +1680,7 @@ define @intrinsic_vwadd.w_mask_wx_tie_nxv16i16_nxv16i16_i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv16i16_nxv16i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vwadd.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1659,7 +1689,7 @@ %0, i8 %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1667,7 +1697,7 @@ define @intrinsic_vwadd.w_mask_wx_tie_nxv32i16_nxv32i16_i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv32i16_nxv32i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vwadd.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1676,7 +1706,7 @@ %0, i8 %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1684,7 +1714,7 @@ define @intrinsic_vwadd.w_mask_wx_tie_nxv1i32_nxv1i32_i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv1i32_nxv1i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vwadd.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1693,7 +1723,7 @@ %0, i16 %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1701,7 +1731,7 @@ define @intrinsic_vwadd.w_mask_wx_tie_nxv2i32_nxv2i32_i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv2i32_nxv2i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vwadd.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1710,7 +1740,7 @@ %0, i16 %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1718,7 +1748,7 @@ define @intrinsic_vwadd.w_mask_wx_tie_nxv4i32_nxv4i32_i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv4i32_nxv4i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vwadd.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1727,7 +1757,7 @@ %0, i16 %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1735,7 +1765,7 @@ define @intrinsic_vwadd.w_mask_wx_tie_nxv8i32_nxv8i32_i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv8i32_nxv8i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vwadd.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1744,7 +1774,7 @@ %0, i16 %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1752,7 +1782,7 @@ define @intrinsic_vwadd.w_mask_wx_tie_nxv16i32_nxv16i32_i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv16i32_nxv16i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vwadd.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1761,7 +1791,7 @@ %0, i16 %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1769,7 +1799,7 @@ define @intrinsic_vwadd.w_mask_wx_tie_nxv1i64_nxv1i64_i32( %0, i32 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv1i64_nxv1i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vwadd.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1778,7 +1808,7 @@ %0, i32 %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1786,7 +1816,7 @@ define @intrinsic_vwadd.w_mask_wx_tie_nxv2i64_nxv2i64_i32( %0, i32 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv2i64_nxv2i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vwadd.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1795,7 +1825,7 @@ %0, i32 %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1803,7 +1833,7 @@ define @intrinsic_vwadd.w_mask_wx_tie_nxv4i64_nxv4i64_i32( %0, i32 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv4i64_nxv4i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vwadd.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1812,7 +1842,7 @@ %0, i32 %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1820,7 +1850,7 @@ define @intrinsic_vwadd.w_mask_wx_tie_nxv8i64_nxv8i64_i32( %0, i32 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv8i64_nxv8i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vwadd.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1829,7 +1859,7 @@ %0, i32 %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv64.ll @@ -26,12 +26,13 @@ , , , + i64, i64); define @intrinsic_vwadd.w_mask_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vwadd.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -70,12 +71,13 @@ , , , + i64, i64); define @intrinsic_vwadd.w_mask_wv_nxv2i16_nxv2i16_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vwadd.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -114,12 +116,13 @@ , , , + i64, i64); define @intrinsic_vwadd.w_mask_wv_nxv4i16_nxv4i16_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vwadd.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -158,12 +161,13 @@ , , , + i64, i64); define @intrinsic_vwadd.w_mask_wv_nxv8i16_nxv8i16_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vwadd.wv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -202,12 +206,13 @@ , , , + i64, i64); define @intrinsic_vwadd.w_mask_wv_nxv16i16_nxv16i16_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vwadd.wv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -246,13 +251,14 @@ , , , + i64, i64); define @intrinsic_vwadd.w_mask_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl4r.v v28, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vwadd.wv v8, v16, v28, v0.t ; CHECK-NEXT: ret entry: @@ -261,7 +267,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -291,12 +297,13 @@ , , , + i64, i64); define @intrinsic_vwadd.w_mask_wv_nxv1i32_nxv1i32_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vwadd.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -335,12 +342,13 @@ , , , + i64, i64); define @intrinsic_vwadd.w_mask_wv_nxv2i32_nxv2i32_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vwadd.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -379,12 +387,13 @@ , , , + i64, i64); define @intrinsic_vwadd.w_mask_wv_nxv4i32_nxv4i32_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vwadd.wv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -423,12 +432,13 @@ , , , + i64, i64); define @intrinsic_vwadd.w_mask_wv_nxv8i32_nxv8i32_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vwadd.wv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -467,13 +477,14 @@ , , , + i64, i64); define @intrinsic_vwadd.w_mask_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl4re16.v v28, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vwadd.wv v8, v16, v28, v0.t ; CHECK-NEXT: ret entry: @@ -482,7 +493,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -512,12 +523,13 @@ , , , + i64, i64); define @intrinsic_vwadd.w_mask_wv_nxv1i64_nxv1i64_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vwadd.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -526,7 +538,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -556,12 +568,13 @@ , , , + i64, i64); define @intrinsic_vwadd.w_mask_wv_nxv2i64_nxv2i64_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vwadd.wv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -600,12 +613,13 @@ , , , + i64, i64); define @intrinsic_vwadd.w_mask_wv_nxv4i64_nxv4i64_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vwadd.wv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -644,13 +658,14 @@ , , , + i64, i64); define @intrinsic_vwadd.w_mask_wv_nxv8i64_nxv8i64_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl4re32.v v28, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vwadd.wv v8, v16, v28, v0.t ; CHECK-NEXT: ret entry: @@ -659,7 +674,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -689,12 +704,13 @@ , i8, , + i64, i64); define @intrinsic_vwadd.w_mask_wx_nxv1i16_nxv1i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv1i16_nxv1i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vwadd.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -703,7 +719,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -733,12 +749,13 @@ , i8, , + i64, i64); define @intrinsic_vwadd.w_mask_wx_nxv2i16_nxv2i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv2i16_nxv2i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vwadd.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -747,7 +764,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -777,12 +794,13 @@ , i8, , + i64, i64); define @intrinsic_vwadd.w_mask_wx_nxv4i16_nxv4i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv4i16_nxv4i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vwadd.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +809,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -821,12 +839,13 @@ , i8, , + i64, i64); define @intrinsic_vwadd.w_mask_wx_nxv8i16_nxv8i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv8i16_nxv8i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vwadd.wx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +854,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -865,12 +884,13 @@ , i8, , + i64, i64); define @intrinsic_vwadd.w_mask_wx_nxv16i16_nxv16i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv16i16_nxv16i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vwadd.wx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +899,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -909,12 +929,13 @@ , i8, , + i64, i64); define @intrinsic_vwadd.w_mask_wx_nxv32i16_nxv32i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv32i16_nxv32i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vwadd.wx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +944,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -953,12 +974,13 @@ , i16, , + i64, i64); define @intrinsic_vwadd.w_mask_wx_nxv1i32_nxv1i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv1i32_nxv1i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vwadd.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -967,7 +989,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -997,12 +1019,13 @@ , i16, , + i64, i64); define @intrinsic_vwadd.w_mask_wx_nxv2i32_nxv2i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv2i32_nxv2i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vwadd.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1011,7 +1034,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1041,12 +1064,13 @@ , i16, , + i64, i64); define @intrinsic_vwadd.w_mask_wx_nxv4i32_nxv4i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv4i32_nxv4i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vwadd.wx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1055,7 +1079,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1085,12 +1109,13 @@ , i16, , + i64, i64); define @intrinsic_vwadd.w_mask_wx_nxv8i32_nxv8i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv8i32_nxv8i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vwadd.wx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1099,7 +1124,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1129,12 +1154,13 @@ , i16, , + i64, i64); define @intrinsic_vwadd.w_mask_wx_nxv16i32_nxv16i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv16i32_nxv16i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vwadd.wx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1143,7 +1169,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1173,12 +1199,13 @@ , i32, , + i64, i64); define @intrinsic_vwadd.w_mask_wx_nxv1i64_nxv1i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv1i64_nxv1i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vwadd.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1187,7 +1214,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1217,12 +1244,13 @@ , i32, , + i64, i64); define @intrinsic_vwadd.w_mask_wx_nxv2i64_nxv2i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv2i64_nxv2i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vwadd.wx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1231,7 +1259,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1261,12 +1289,13 @@ , i32, , + i64, i64); define @intrinsic_vwadd.w_mask_wx_nxv4i64_nxv4i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv4i64_nxv4i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vwadd.wx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1275,7 +1304,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1305,12 +1334,13 @@ , i32, , + i64, i64); define @intrinsic_vwadd.w_mask_wx_nxv8i64_nxv8i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv8i64_nxv8i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vwadd.wx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1319,7 +1349,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1327,7 +1357,7 @@ define @intrinsic_vwadd.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vwadd.wv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -1336,7 +1366,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1344,7 +1374,7 @@ define @intrinsic_vwadd.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vwadd.wv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -1353,7 +1383,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1361,7 +1391,7 @@ define @intrinsic_vwadd.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vwadd.wv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -1370,7 +1400,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1378,7 +1408,7 @@ define @intrinsic_vwadd.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vwadd.wv v8, v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -1387,7 +1417,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1395,7 +1425,7 @@ define @intrinsic_vwadd.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vwadd.wv v8, v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -1404,7 +1434,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1412,7 +1442,7 @@ define @intrinsic_vwadd.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vwadd.wv v8, v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -1421,7 +1451,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1429,7 +1459,7 @@ define @intrinsic_vwadd.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vwadd.wv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -1438,7 +1468,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1446,7 +1476,7 @@ define @intrinsic_vwadd.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vwadd.wv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -1455,7 +1485,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1463,7 +1493,7 @@ define @intrinsic_vwadd.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vwadd.wv v8, v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -1472,7 +1502,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1480,7 +1510,7 @@ define @intrinsic_vwadd.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vwadd.wv v8, v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -1489,7 +1519,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1497,7 +1527,7 @@ define @intrinsic_vwadd.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vwadd.wv v8, v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -1506,7 +1536,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1514,7 +1544,7 @@ define @intrinsic_vwadd.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vwadd.wv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -1523,7 +1553,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1531,7 +1561,7 @@ define @intrinsic_vwadd.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vwadd.wv v8, v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -1540,7 +1570,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1548,7 +1578,7 @@ define @intrinsic_vwadd.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vwadd.wv v8, v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -1557,7 +1587,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1565,7 +1595,7 @@ define @intrinsic_vwadd.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vwadd.wv v8, v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -1574,7 +1604,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1582,7 +1612,7 @@ define @intrinsic_vwadd.w_mask_wx_tie_nxv1i16_nxv1i16_i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv1i16_nxv1i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vwadd.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1591,7 +1621,7 @@ %0, i8 %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1599,7 +1629,7 @@ define @intrinsic_vwadd.w_mask_wx_tie_nxv2i16_nxv2i16_i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv2i16_nxv2i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vwadd.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1608,7 +1638,7 @@ %0, i8 %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1616,7 +1646,7 @@ define @intrinsic_vwadd.w_mask_wx_tie_nxv4i16_nxv4i16_i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv4i16_nxv4i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vwadd.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1625,7 +1655,7 @@ %0, i8 %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1633,7 +1663,7 @@ define @intrinsic_vwadd.w_mask_wx_tie_nxv8i16_nxv8i16_i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv8i16_nxv8i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vwadd.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1642,7 +1672,7 @@ %0, i8 %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1650,7 +1680,7 @@ define @intrinsic_vwadd.w_mask_wx_tie_nxv16i16_nxv16i16_i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv16i16_nxv16i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vwadd.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1659,7 +1689,7 @@ %0, i8 %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1667,7 +1697,7 @@ define @intrinsic_vwadd.w_mask_wx_tie_nxv32i16_nxv32i16_i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv32i16_nxv32i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vwadd.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1676,7 +1706,7 @@ %0, i8 %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1684,7 +1714,7 @@ define @intrinsic_vwadd.w_mask_wx_tie_nxv1i32_nxv1i32_i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv1i32_nxv1i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vwadd.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1693,7 +1723,7 @@ %0, i16 %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1701,7 +1731,7 @@ define @intrinsic_vwadd.w_mask_wx_tie_nxv2i32_nxv2i32_i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv2i32_nxv2i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vwadd.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1710,7 +1740,7 @@ %0, i16 %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1718,7 +1748,7 @@ define @intrinsic_vwadd.w_mask_wx_tie_nxv4i32_nxv4i32_i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv4i32_nxv4i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vwadd.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1727,7 +1757,7 @@ %0, i16 %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1735,7 +1765,7 @@ define @intrinsic_vwadd.w_mask_wx_tie_nxv8i32_nxv8i32_i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv8i32_nxv8i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vwadd.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1744,7 +1774,7 @@ %0, i16 %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1752,7 +1782,7 @@ define @intrinsic_vwadd.w_mask_wx_tie_nxv16i32_nxv16i32_i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv16i32_nxv16i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vwadd.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1761,7 +1791,7 @@ %0, i16 %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1769,7 +1799,7 @@ define @intrinsic_vwadd.w_mask_wx_tie_nxv1i64_nxv1i64_i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv1i64_nxv1i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vwadd.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1778,7 +1808,7 @@ %0, i32 %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1786,7 +1816,7 @@ define @intrinsic_vwadd.w_mask_wx_tie_nxv2i64_nxv2i64_i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv2i64_nxv2i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vwadd.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1795,7 +1825,7 @@ %0, i32 %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1803,7 +1833,7 @@ define @intrinsic_vwadd.w_mask_wx_tie_nxv4i64_nxv4i64_i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv4i64_nxv4i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vwadd.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1812,7 +1842,7 @@ %0, i32 %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1820,7 +1850,7 @@ define @intrinsic_vwadd.w_mask_wx_tie_nxv8i64_nxv8i64_i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv8i64_nxv8i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vwadd.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1829,7 +1859,7 @@ %0, i32 %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv32.ll @@ -27,12 +27,13 @@ , , , + i32, i32); define @intrinsic_vwaddu_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vwaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -41,7 +42,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -72,12 +73,13 @@ , , , + i32, i32); define @intrinsic_vwaddu_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vwaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -86,7 +88,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -117,12 +119,13 @@ , , , + i32, i32); define @intrinsic_vwaddu_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vwaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -131,7 +134,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -162,12 +165,13 @@ , , , + i32, i32); define @intrinsic_vwaddu_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vwaddu.vv v8, v10, v11, v0.t ; CHECK-NEXT: ret entry: @@ -176,7 +180,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -207,12 +211,13 @@ , , , + i32, i32); define @intrinsic_vwaddu_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vwaddu.vv v8, v12, v14, v0.t ; CHECK-NEXT: ret entry: @@ -221,7 +226,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -252,12 +257,13 @@ , , , + i32, i32); define @intrinsic_vwaddu_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vwaddu.vv v8, v16, v20, v0.t ; CHECK-NEXT: ret entry: @@ -266,7 +272,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -297,12 +303,13 @@ , , , + i32, i32); define @intrinsic_vwaddu_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vwaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -311,7 +318,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -342,12 +349,13 @@ , , , + i32, i32); define @intrinsic_vwaddu_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vwaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -356,7 +364,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -387,12 +395,13 @@ , , , + i32, i32); define @intrinsic_vwaddu_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vwaddu.vv v8, v10, v11, v0.t ; CHECK-NEXT: ret entry: @@ -401,7 +410,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -432,12 +441,13 @@ , , , + i32, i32); define @intrinsic_vwaddu_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vwaddu.vv v8, v12, v14, v0.t ; CHECK-NEXT: ret entry: @@ -446,7 +456,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -477,12 +487,13 @@ , , , + i32, i32); define @intrinsic_vwaddu_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vwaddu.vv v8, v16, v20, v0.t ; CHECK-NEXT: ret entry: @@ -491,7 +502,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -522,12 +533,13 @@ , , , + i32, i32); define @intrinsic_vwaddu_mask_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vwaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -536,7 +548,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -567,12 +579,13 @@ , , , + i32, i32); define @intrinsic_vwaddu_mask_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vwaddu.vv v8, v10, v11, v0.t ; CHECK-NEXT: ret entry: @@ -581,7 +594,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -612,12 +625,13 @@ , , , + i32, i32); define @intrinsic_vwaddu_mask_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vwaddu.vv v8, v12, v14, v0.t ; CHECK-NEXT: ret entry: @@ -626,7 +640,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -657,12 +671,13 @@ , , , + i32, i32); define @intrinsic_vwaddu_mask_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vwaddu.vv v8, v16, v20, v0.t ; CHECK-NEXT: ret entry: @@ -671,7 +686,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -702,12 +717,13 @@ , i8, , + i32, i32); define @intrinsic_vwaddu_mask_vx_nxv1i16_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv1i16_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vwaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -716,7 +732,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -747,12 +763,13 @@ , i8, , + i32, i32); define @intrinsic_vwaddu_mask_vx_nxv2i16_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv2i16_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vwaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -761,7 +778,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -792,12 +809,13 @@ , i8, , + i32, i32); define @intrinsic_vwaddu_mask_vx_nxv4i16_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv4i16_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vwaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -806,7 +824,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -837,12 +855,13 @@ , i8, , + i32, i32); define @intrinsic_vwaddu_mask_vx_nxv8i16_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv8i16_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vwaddu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -851,7 +870,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -882,12 +901,13 @@ , i8, , + i32, i32); define @intrinsic_vwaddu_mask_vx_nxv16i16_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv16i16_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vwaddu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -896,7 +916,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -927,12 +947,13 @@ , i8, , + i32, i32); define @intrinsic_vwaddu_mask_vx_nxv32i16_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv32i16_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vwaddu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -941,7 +962,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -972,12 +993,13 @@ , i16, , + i32, i32); define @intrinsic_vwaddu_mask_vx_nxv1i32_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv1i32_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vwaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -986,7 +1008,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1017,12 +1039,13 @@ , i16, , + i32, i32); define @intrinsic_vwaddu_mask_vx_nxv2i32_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv2i32_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vwaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1031,7 +1054,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1062,12 +1085,13 @@ , i16, , + i32, i32); define @intrinsic_vwaddu_mask_vx_nxv4i32_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv4i32_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vwaddu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1076,7 +1100,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1107,12 +1131,13 @@ , i16, , + i32, i32); define @intrinsic_vwaddu_mask_vx_nxv8i32_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv8i32_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vwaddu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1121,7 +1146,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1152,12 +1177,13 @@ , i16, , + i32, i32); define @intrinsic_vwaddu_mask_vx_nxv16i32_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv16i32_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vwaddu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1166,7 +1192,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1197,12 +1223,13 @@ , i32, , + i32, i32); define @intrinsic_vwaddu_mask_vx_nxv1i64_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv1i64_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vwaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1211,7 +1238,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1242,12 +1269,13 @@ , i32, , + i32, i32); define @intrinsic_vwaddu_mask_vx_nxv2i64_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv2i64_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vwaddu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1256,7 +1284,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1287,12 +1315,13 @@ , i32, , + i32, i32); define @intrinsic_vwaddu_mask_vx_nxv4i64_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv4i64_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vwaddu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1301,7 +1330,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1332,12 +1361,13 @@ , i32, , + i32, i32); define @intrinsic_vwaddu_mask_vx_nxv8i64_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv8i64_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vwaddu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1346,7 +1376,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv64.ll @@ -27,12 +27,13 @@ , , , + i64, i64); define @intrinsic_vwaddu_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vwaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -41,7 +42,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -72,12 +73,13 @@ , , , + i64, i64); define @intrinsic_vwaddu_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vwaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -86,7 +88,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -117,12 +119,13 @@ , , , + i64, i64); define @intrinsic_vwaddu_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vwaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -131,7 +134,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -162,12 +165,13 @@ , , , + i64, i64); define @intrinsic_vwaddu_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vwaddu.vv v8, v10, v11, v0.t ; CHECK-NEXT: ret entry: @@ -176,7 +180,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -207,12 +211,13 @@ , , , + i64, i64); define @intrinsic_vwaddu_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vwaddu.vv v8, v12, v14, v0.t ; CHECK-NEXT: ret entry: @@ -221,7 +226,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -252,12 +257,13 @@ , , , + i64, i64); define @intrinsic_vwaddu_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vwaddu.vv v8, v16, v20, v0.t ; CHECK-NEXT: ret entry: @@ -266,7 +272,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -297,12 +303,13 @@ , , , + i64, i64); define @intrinsic_vwaddu_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vwaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -311,7 +318,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -342,12 +349,13 @@ , , , + i64, i64); define @intrinsic_vwaddu_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vwaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -356,7 +364,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -387,12 +395,13 @@ , , , + i64, i64); define @intrinsic_vwaddu_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vwaddu.vv v8, v10, v11, v0.t ; CHECK-NEXT: ret entry: @@ -401,7 +410,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -432,12 +441,13 @@ , , , + i64, i64); define @intrinsic_vwaddu_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vwaddu.vv v8, v12, v14, v0.t ; CHECK-NEXT: ret entry: @@ -446,7 +456,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -477,12 +487,13 @@ , , , + i64, i64); define @intrinsic_vwaddu_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vwaddu.vv v8, v16, v20, v0.t ; CHECK-NEXT: ret entry: @@ -491,7 +502,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -522,12 +533,13 @@ , , , + i64, i64); define @intrinsic_vwaddu_mask_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vwaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -536,7 +548,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -567,12 +579,13 @@ , , , + i64, i64); define @intrinsic_vwaddu_mask_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vwaddu.vv v8, v10, v11, v0.t ; CHECK-NEXT: ret entry: @@ -581,7 +594,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -612,12 +625,13 @@ , , , + i64, i64); define @intrinsic_vwaddu_mask_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vwaddu.vv v8, v12, v14, v0.t ; CHECK-NEXT: ret entry: @@ -626,7 +640,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -657,12 +671,13 @@ , , , + i64, i64); define @intrinsic_vwaddu_mask_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vwaddu.vv v8, v16, v20, v0.t ; CHECK-NEXT: ret entry: @@ -671,7 +686,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -702,12 +717,13 @@ , i8, , + i64, i64); define @intrinsic_vwaddu_mask_vx_nxv1i16_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv1i16_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vwaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -716,7 +732,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -747,12 +763,13 @@ , i8, , + i64, i64); define @intrinsic_vwaddu_mask_vx_nxv2i16_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv2i16_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vwaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -761,7 +778,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -792,12 +809,13 @@ , i8, , + i64, i64); define @intrinsic_vwaddu_mask_vx_nxv4i16_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv4i16_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vwaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -806,7 +824,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -837,12 +855,13 @@ , i8, , + i64, i64); define @intrinsic_vwaddu_mask_vx_nxv8i16_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv8i16_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vwaddu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -851,7 +870,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -882,12 +901,13 @@ , i8, , + i64, i64); define @intrinsic_vwaddu_mask_vx_nxv16i16_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv16i16_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vwaddu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -896,7 +916,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -927,12 +947,13 @@ , i8, , + i64, i64); define @intrinsic_vwaddu_mask_vx_nxv32i16_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv32i16_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vwaddu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -941,7 +962,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -972,12 +993,13 @@ , i16, , + i64, i64); define @intrinsic_vwaddu_mask_vx_nxv1i32_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv1i32_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vwaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -986,7 +1008,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1017,12 +1039,13 @@ , i16, , + i64, i64); define @intrinsic_vwaddu_mask_vx_nxv2i32_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv2i32_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vwaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1031,7 +1054,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1062,12 +1085,13 @@ , i16, , + i64, i64); define @intrinsic_vwaddu_mask_vx_nxv4i32_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv4i32_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vwaddu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1076,7 +1100,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1107,12 +1131,13 @@ , i16, , + i64, i64); define @intrinsic_vwaddu_mask_vx_nxv8i32_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv8i32_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vwaddu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1121,7 +1146,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1152,12 +1177,13 @@ , i16, , + i64, i64); define @intrinsic_vwaddu_mask_vx_nxv16i32_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv16i32_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vwaddu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1166,7 +1192,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1197,12 +1223,13 @@ , i32, , + i64, i64); define @intrinsic_vwaddu_mask_vx_nxv1i64_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv1i64_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vwaddu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1211,7 +1238,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1242,12 +1269,13 @@ , i32, , + i64, i64); define @intrinsic_vwaddu_mask_vx_nxv2i64_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv2i64_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vwaddu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1256,7 +1284,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1287,12 +1315,13 @@ , i32, , + i64, i64); define @intrinsic_vwaddu_mask_vx_nxv4i64_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv4i64_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vwaddu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1301,7 +1330,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1332,12 +1361,13 @@ , i32, , + i64, i64); define @intrinsic_vwaddu_mask_vx_nxv8i64_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv8i64_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vwaddu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1346,7 +1376,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv32.ll @@ -26,12 +26,13 @@ , , , + i32, i32); define @intrinsic_vwaddu.w_mask_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vwaddu.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -70,12 +71,13 @@ , , , + i32, i32); define @intrinsic_vwaddu.w_mask_wv_nxv2i16_nxv2i16_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vwaddu.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -114,12 +116,13 @@ , , , + i32, i32); define @intrinsic_vwaddu.w_mask_wv_nxv4i16_nxv4i16_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vwaddu.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -158,12 +161,13 @@ , , , + i32, i32); define @intrinsic_vwaddu.w_mask_wv_nxv8i16_nxv8i16_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vwaddu.wv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -202,12 +206,13 @@ , , , + i32, i32); define @intrinsic_vwaddu.w_mask_wv_nxv16i16_nxv16i16_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vwaddu.wv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -246,13 +251,14 @@ , , , + i32, i32); define @intrinsic_vwaddu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl4r.v v28, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vwaddu.wv v8, v16, v28, v0.t ; CHECK-NEXT: ret entry: @@ -261,7 +267,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -291,12 +297,13 @@ , , , + i32, i32); define @intrinsic_vwaddu.w_mask_wv_nxv1i32_nxv1i32_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vwaddu.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -335,12 +342,13 @@ , , , + i32, i32); define @intrinsic_vwaddu.w_mask_wv_nxv2i32_nxv2i32_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vwaddu.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -379,12 +387,13 @@ , , , + i32, i32); define @intrinsic_vwaddu.w_mask_wv_nxv4i32_nxv4i32_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vwaddu.wv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -423,12 +432,13 @@ , , , + i32, i32); define @intrinsic_vwaddu.w_mask_wv_nxv8i32_nxv8i32_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vwaddu.wv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -467,13 +477,14 @@ , , , + i32, i32); define @intrinsic_vwaddu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl4re16.v v28, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vwaddu.wv v8, v16, v28, v0.t ; CHECK-NEXT: ret entry: @@ -482,7 +493,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -512,12 +523,13 @@ , , , + i32, i32); define @intrinsic_vwaddu.w_mask_wv_nxv1i64_nxv1i64_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vwaddu.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -526,7 +538,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -556,12 +568,13 @@ , , , + i32, i32); define @intrinsic_vwaddu.w_mask_wv_nxv2i64_nxv2i64_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vwaddu.wv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -600,12 +613,13 @@ , , , + i32, i32); define @intrinsic_vwaddu.w_mask_wv_nxv4i64_nxv4i64_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vwaddu.wv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -644,13 +658,14 @@ , , , + i32, i32); define @intrinsic_vwaddu.w_mask_wv_nxv8i64_nxv8i64_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl4re32.v v28, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vwaddu.wv v8, v16, v28, v0.t ; CHECK-NEXT: ret entry: @@ -659,7 +674,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -689,12 +704,13 @@ , i8, , + i32, i32); define @intrinsic_vwaddu.w_mask_wx_nxv1i16_nxv1i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv1i16_nxv1i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vwaddu.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -703,7 +719,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -733,12 +749,13 @@ , i8, , + i32, i32); define @intrinsic_vwaddu.w_mask_wx_nxv2i16_nxv2i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv2i16_nxv2i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vwaddu.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -747,7 +764,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -777,12 +794,13 @@ , i8, , + i32, i32); define @intrinsic_vwaddu.w_mask_wx_nxv4i16_nxv4i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv4i16_nxv4i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vwaddu.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +809,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -821,12 +839,13 @@ , i8, , + i32, i32); define @intrinsic_vwaddu.w_mask_wx_nxv8i16_nxv8i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv8i16_nxv8i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vwaddu.wx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +854,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -865,12 +884,13 @@ , i8, , + i32, i32); define @intrinsic_vwaddu.w_mask_wx_nxv16i16_nxv16i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv16i16_nxv16i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vwaddu.wx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +899,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -909,12 +929,13 @@ , i8, , + i32, i32); define @intrinsic_vwaddu.w_mask_wx_nxv32i16_nxv32i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv32i16_nxv32i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vwaddu.wx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +944,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -953,12 +974,13 @@ , i16, , + i32, i32); define @intrinsic_vwaddu.w_mask_wx_nxv1i32_nxv1i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv1i32_nxv1i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vwaddu.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -967,7 +989,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -997,12 +1019,13 @@ , i16, , + i32, i32); define @intrinsic_vwaddu.w_mask_wx_nxv2i32_nxv2i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv2i32_nxv2i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vwaddu.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1011,7 +1034,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1041,12 +1064,13 @@ , i16, , + i32, i32); define @intrinsic_vwaddu.w_mask_wx_nxv4i32_nxv4i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv4i32_nxv4i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vwaddu.wx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1055,7 +1079,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1085,12 +1109,13 @@ , i16, , + i32, i32); define @intrinsic_vwaddu.w_mask_wx_nxv8i32_nxv8i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv8i32_nxv8i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vwaddu.wx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1099,7 +1124,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1129,12 +1154,13 @@ , i16, , + i32, i32); define @intrinsic_vwaddu.w_mask_wx_nxv16i32_nxv16i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv16i32_nxv16i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vwaddu.wx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1143,7 +1169,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1173,12 +1199,13 @@ , i32, , + i32, i32); define @intrinsic_vwaddu.w_mask_wx_nxv1i64_nxv1i64_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv1i64_nxv1i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vwaddu.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1187,7 +1214,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1217,12 +1244,13 @@ , i32, , + i32, i32); define @intrinsic_vwaddu.w_mask_wx_nxv2i64_nxv2i64_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv2i64_nxv2i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vwaddu.wx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1231,7 +1259,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1261,12 +1289,13 @@ , i32, , + i32, i32); define @intrinsic_vwaddu.w_mask_wx_nxv4i64_nxv4i64_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv4i64_nxv4i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vwaddu.wx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1275,7 +1304,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1305,12 +1334,13 @@ , i32, , + i32, i32); define @intrinsic_vwaddu.w_mask_wx_nxv8i64_nxv8i64_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv8i64_nxv8i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vwaddu.wx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1319,7 +1349,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1327,7 +1357,7 @@ define @intrinsic_vwaddu.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vwaddu.wv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -1336,7 +1366,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1344,7 +1374,7 @@ define @intrinsic_vwaddu.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vwaddu.wv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -1353,7 +1383,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1361,7 +1391,7 @@ define @intrinsic_vwaddu.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vwaddu.wv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -1370,7 +1400,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1378,7 +1408,7 @@ define @intrinsic_vwaddu.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vwaddu.wv v8, v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -1387,7 +1417,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1395,7 +1425,7 @@ define @intrinsic_vwaddu.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vwaddu.wv v8, v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -1404,7 +1434,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1412,7 +1442,7 @@ define @intrinsic_vwaddu.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vwaddu.wv v8, v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -1421,7 +1451,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1429,7 +1459,7 @@ define @intrinsic_vwaddu.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vwaddu.wv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -1438,7 +1468,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1446,7 +1476,7 @@ define @intrinsic_vwaddu.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vwaddu.wv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -1455,7 +1485,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1463,7 +1493,7 @@ define @intrinsic_vwaddu.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vwaddu.wv v8, v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -1472,7 +1502,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1480,7 +1510,7 @@ define @intrinsic_vwaddu.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vwaddu.wv v8, v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -1489,7 +1519,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1497,7 +1527,7 @@ define @intrinsic_vwaddu.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vwaddu.wv v8, v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -1506,7 +1536,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1514,7 +1544,7 @@ define @intrinsic_vwaddu.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vwaddu.wv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -1523,7 +1553,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1531,7 +1561,7 @@ define @intrinsic_vwaddu.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vwaddu.wv v8, v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -1540,7 +1570,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1548,7 +1578,7 @@ define @intrinsic_vwaddu.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vwaddu.wv v8, v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -1557,7 +1587,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1565,7 +1595,7 @@ define @intrinsic_vwaddu.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vwaddu.wv v8, v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -1574,7 +1604,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1582,7 +1612,7 @@ define @intrinsic_vwaddu.w_mask_wx_tie_nxv1i16_nxv1i16_i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv1i16_nxv1i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vwaddu.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1591,7 +1621,7 @@ %0, i8 %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1599,7 +1629,7 @@ define @intrinsic_vwaddu.w_mask_wx_tie_nxv2i16_nxv2i16_i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv2i16_nxv2i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vwaddu.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1608,7 +1638,7 @@ %0, i8 %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1616,7 +1646,7 @@ define @intrinsic_vwaddu.w_mask_wx_tie_nxv4i16_nxv4i16_i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv4i16_nxv4i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vwaddu.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1625,7 +1655,7 @@ %0, i8 %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1633,7 +1663,7 @@ define @intrinsic_vwaddu.w_mask_wx_tie_nxv8i16_nxv8i16_i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv8i16_nxv8i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vwaddu.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1642,7 +1672,7 @@ %0, i8 %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1650,7 +1680,7 @@ define @intrinsic_vwaddu.w_mask_wx_tie_nxv16i16_nxv16i16_i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv16i16_nxv16i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vwaddu.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1659,7 +1689,7 @@ %0, i8 %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1667,7 +1697,7 @@ define @intrinsic_vwaddu.w_mask_wx_tie_nxv32i16_nxv32i16_i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv32i16_nxv32i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vwaddu.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1676,7 +1706,7 @@ %0, i8 %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1684,7 +1714,7 @@ define @intrinsic_vwaddu.w_mask_wx_tie_nxv1i32_nxv1i32_i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv1i32_nxv1i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vwaddu.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1693,7 +1723,7 @@ %0, i16 %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1701,7 +1731,7 @@ define @intrinsic_vwaddu.w_mask_wx_tie_nxv2i32_nxv2i32_i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv2i32_nxv2i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vwaddu.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1710,7 +1740,7 @@ %0, i16 %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1718,7 +1748,7 @@ define @intrinsic_vwaddu.w_mask_wx_tie_nxv4i32_nxv4i32_i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv4i32_nxv4i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vwaddu.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1727,7 +1757,7 @@ %0, i16 %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1735,7 +1765,7 @@ define @intrinsic_vwaddu.w_mask_wx_tie_nxv8i32_nxv8i32_i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv8i32_nxv8i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vwaddu.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1744,7 +1774,7 @@ %0, i16 %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1752,7 +1782,7 @@ define @intrinsic_vwaddu.w_mask_wx_tie_nxv16i32_nxv16i32_i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv16i32_nxv16i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vwaddu.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1761,7 +1791,7 @@ %0, i16 %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1769,7 +1799,7 @@ define @intrinsic_vwaddu.w_mask_wx_tie_nxv1i64_nxv1i64_i32( %0, i32 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv1i64_nxv1i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vwaddu.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1778,7 +1808,7 @@ %0, i32 %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1786,7 +1816,7 @@ define @intrinsic_vwaddu.w_mask_wx_tie_nxv2i64_nxv2i64_i32( %0, i32 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv2i64_nxv2i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vwaddu.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1795,7 +1825,7 @@ %0, i32 %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1803,7 +1833,7 @@ define @intrinsic_vwaddu.w_mask_wx_tie_nxv4i64_nxv4i64_i32( %0, i32 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv4i64_nxv4i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vwaddu.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1812,7 +1842,7 @@ %0, i32 %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1820,7 +1850,7 @@ define @intrinsic_vwaddu.w_mask_wx_tie_nxv8i64_nxv8i64_i32( %0, i32 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv8i64_nxv8i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vwaddu.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1829,7 +1859,7 @@ %0, i32 %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv64.ll @@ -26,12 +26,13 @@ , , , + i64, i64); define @intrinsic_vwaddu.w_mask_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vwaddu.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -70,12 +71,13 @@ , , , + i64, i64); define @intrinsic_vwaddu.w_mask_wv_nxv2i16_nxv2i16_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vwaddu.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -114,12 +116,13 @@ , , , + i64, i64); define @intrinsic_vwaddu.w_mask_wv_nxv4i16_nxv4i16_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vwaddu.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -158,12 +161,13 @@ , , , + i64, i64); define @intrinsic_vwaddu.w_mask_wv_nxv8i16_nxv8i16_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vwaddu.wv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -202,12 +206,13 @@ , , , + i64, i64); define @intrinsic_vwaddu.w_mask_wv_nxv16i16_nxv16i16_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vwaddu.wv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -246,13 +251,14 @@ , , , + i64, i64); define @intrinsic_vwaddu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl4r.v v28, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vwaddu.wv v8, v16, v28, v0.t ; CHECK-NEXT: ret entry: @@ -261,7 +267,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -291,12 +297,13 @@ , , , + i64, i64); define @intrinsic_vwaddu.w_mask_wv_nxv1i32_nxv1i32_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vwaddu.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -335,12 +342,13 @@ , , , + i64, i64); define @intrinsic_vwaddu.w_mask_wv_nxv2i32_nxv2i32_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vwaddu.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -379,12 +387,13 @@ , , , + i64, i64); define @intrinsic_vwaddu.w_mask_wv_nxv4i32_nxv4i32_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vwaddu.wv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -423,12 +432,13 @@ , , , + i64, i64); define @intrinsic_vwaddu.w_mask_wv_nxv8i32_nxv8i32_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vwaddu.wv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -467,13 +477,14 @@ , , , + i64, i64); define @intrinsic_vwaddu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl4re16.v v28, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vwaddu.wv v8, v16, v28, v0.t ; CHECK-NEXT: ret entry: @@ -482,7 +493,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -512,12 +523,13 @@ , , , + i64, i64); define @intrinsic_vwaddu.w_mask_wv_nxv1i64_nxv1i64_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vwaddu.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -526,7 +538,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -556,12 +568,13 @@ , , , + i64, i64); define @intrinsic_vwaddu.w_mask_wv_nxv2i64_nxv2i64_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vwaddu.wv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -600,12 +613,13 @@ , , , + i64, i64); define @intrinsic_vwaddu.w_mask_wv_nxv4i64_nxv4i64_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vwaddu.wv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -644,13 +658,14 @@ , , , + i64, i64); define @intrinsic_vwaddu.w_mask_wv_nxv8i64_nxv8i64_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl4re32.v v28, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vwaddu.wv v8, v16, v28, v0.t ; CHECK-NEXT: ret entry: @@ -659,7 +674,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -689,12 +704,13 @@ , i8, , + i64, i64); define @intrinsic_vwaddu.w_mask_wx_nxv1i16_nxv1i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv1i16_nxv1i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vwaddu.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -703,7 +719,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -733,12 +749,13 @@ , i8, , + i64, i64); define @intrinsic_vwaddu.w_mask_wx_nxv2i16_nxv2i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv2i16_nxv2i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vwaddu.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -747,7 +764,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -777,12 +794,13 @@ , i8, , + i64, i64); define @intrinsic_vwaddu.w_mask_wx_nxv4i16_nxv4i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv4i16_nxv4i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vwaddu.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +809,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -821,12 +839,13 @@ , i8, , + i64, i64); define @intrinsic_vwaddu.w_mask_wx_nxv8i16_nxv8i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv8i16_nxv8i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vwaddu.wx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +854,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -865,12 +884,13 @@ , i8, , + i64, i64); define @intrinsic_vwaddu.w_mask_wx_nxv16i16_nxv16i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv16i16_nxv16i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vwaddu.wx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +899,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -909,12 +929,13 @@ , i8, , + i64, i64); define @intrinsic_vwaddu.w_mask_wx_nxv32i16_nxv32i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv32i16_nxv32i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vwaddu.wx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +944,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -953,12 +974,13 @@ , i16, , + i64, i64); define @intrinsic_vwaddu.w_mask_wx_nxv1i32_nxv1i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv1i32_nxv1i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vwaddu.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -967,7 +989,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -997,12 +1019,13 @@ , i16, , + i64, i64); define @intrinsic_vwaddu.w_mask_wx_nxv2i32_nxv2i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv2i32_nxv2i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vwaddu.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1011,7 +1034,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1041,12 +1064,13 @@ , i16, , + i64, i64); define @intrinsic_vwaddu.w_mask_wx_nxv4i32_nxv4i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv4i32_nxv4i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vwaddu.wx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1055,7 +1079,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1085,12 +1109,13 @@ , i16, , + i64, i64); define @intrinsic_vwaddu.w_mask_wx_nxv8i32_nxv8i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv8i32_nxv8i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vwaddu.wx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1099,7 +1124,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1129,12 +1154,13 @@ , i16, , + i64, i64); define @intrinsic_vwaddu.w_mask_wx_nxv16i32_nxv16i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv16i32_nxv16i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vwaddu.wx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1143,7 +1169,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1173,12 +1199,13 @@ , i32, , + i64, i64); define @intrinsic_vwaddu.w_mask_wx_nxv1i64_nxv1i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv1i64_nxv1i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vwaddu.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1187,7 +1214,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1217,12 +1244,13 @@ , i32, , + i64, i64); define @intrinsic_vwaddu.w_mask_wx_nxv2i64_nxv2i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv2i64_nxv2i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vwaddu.wx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1231,7 +1259,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1261,12 +1289,13 @@ , i32, , + i64, i64); define @intrinsic_vwaddu.w_mask_wx_nxv4i64_nxv4i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv4i64_nxv4i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vwaddu.wx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1275,7 +1304,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1305,12 +1334,13 @@ , i32, , + i64, i64); define @intrinsic_vwaddu.w_mask_wx_nxv8i64_nxv8i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv8i64_nxv8i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vwaddu.wx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1319,7 +1349,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1327,7 +1357,7 @@ define @intrinsic_vwaddu.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vwaddu.wv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -1336,7 +1366,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1344,7 +1374,7 @@ define @intrinsic_vwaddu.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vwaddu.wv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -1353,7 +1383,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1361,7 +1391,7 @@ define @intrinsic_vwaddu.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vwaddu.wv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -1370,7 +1400,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1378,7 +1408,7 @@ define @intrinsic_vwaddu.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vwaddu.wv v8, v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -1387,7 +1417,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1395,7 +1425,7 @@ define @intrinsic_vwaddu.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vwaddu.wv v8, v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -1404,7 +1434,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1412,7 +1442,7 @@ define @intrinsic_vwaddu.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vwaddu.wv v8, v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -1421,7 +1451,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1429,7 +1459,7 @@ define @intrinsic_vwaddu.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vwaddu.wv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -1438,7 +1468,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1446,7 +1476,7 @@ define @intrinsic_vwaddu.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vwaddu.wv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -1455,7 +1485,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1463,7 +1493,7 @@ define @intrinsic_vwaddu.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vwaddu.wv v8, v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -1472,7 +1502,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1480,7 +1510,7 @@ define @intrinsic_vwaddu.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vwaddu.wv v8, v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -1489,7 +1519,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1497,7 +1527,7 @@ define @intrinsic_vwaddu.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vwaddu.wv v8, v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -1506,7 +1536,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1514,7 +1544,7 @@ define @intrinsic_vwaddu.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vwaddu.wv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -1523,7 +1553,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1531,7 +1561,7 @@ define @intrinsic_vwaddu.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vwaddu.wv v8, v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -1540,7 +1570,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1548,7 +1578,7 @@ define @intrinsic_vwaddu.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vwaddu.wv v8, v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -1557,7 +1587,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1565,7 +1595,7 @@ define @intrinsic_vwaddu.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vwaddu.wv v8, v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -1574,7 +1604,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1582,7 +1612,7 @@ define @intrinsic_vwaddu.w_mask_wx_tie_nxv1i16_nxv1i16_i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv1i16_nxv1i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vwaddu.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1591,7 +1621,7 @@ %0, i8 %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1599,7 +1629,7 @@ define @intrinsic_vwaddu.w_mask_wx_tie_nxv2i16_nxv2i16_i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv2i16_nxv2i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vwaddu.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1608,7 +1638,7 @@ %0, i8 %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1616,7 +1646,7 @@ define @intrinsic_vwaddu.w_mask_wx_tie_nxv4i16_nxv4i16_i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv4i16_nxv4i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vwaddu.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1625,7 +1655,7 @@ %0, i8 %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1633,7 +1663,7 @@ define @intrinsic_vwaddu.w_mask_wx_tie_nxv8i16_nxv8i16_i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv8i16_nxv8i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vwaddu.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1642,7 +1672,7 @@ %0, i8 %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1650,7 +1680,7 @@ define @intrinsic_vwaddu.w_mask_wx_tie_nxv16i16_nxv16i16_i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv16i16_nxv16i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vwaddu.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1659,7 +1689,7 @@ %0, i8 %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1667,7 +1697,7 @@ define @intrinsic_vwaddu.w_mask_wx_tie_nxv32i16_nxv32i16_i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv32i16_nxv32i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vwaddu.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1676,7 +1706,7 @@ %0, i8 %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1684,7 +1714,7 @@ define @intrinsic_vwaddu.w_mask_wx_tie_nxv1i32_nxv1i32_i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv1i32_nxv1i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vwaddu.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1693,7 +1723,7 @@ %0, i16 %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1701,7 +1731,7 @@ define @intrinsic_vwaddu.w_mask_wx_tie_nxv2i32_nxv2i32_i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv2i32_nxv2i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vwaddu.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1710,7 +1740,7 @@ %0, i16 %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1718,7 +1748,7 @@ define @intrinsic_vwaddu.w_mask_wx_tie_nxv4i32_nxv4i32_i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv4i32_nxv4i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vwaddu.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1727,7 +1757,7 @@ %0, i16 %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1735,7 +1765,7 @@ define @intrinsic_vwaddu.w_mask_wx_tie_nxv8i32_nxv8i32_i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv8i32_nxv8i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vwaddu.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1744,7 +1774,7 @@ %0, i16 %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1752,7 +1782,7 @@ define @intrinsic_vwaddu.w_mask_wx_tie_nxv16i32_nxv16i32_i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv16i32_nxv16i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vwaddu.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1761,7 +1791,7 @@ %0, i16 %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1769,7 +1799,7 @@ define @intrinsic_vwaddu.w_mask_wx_tie_nxv1i64_nxv1i64_i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv1i64_nxv1i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vwaddu.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1778,7 +1808,7 @@ %0, i32 %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1786,7 +1816,7 @@ define @intrinsic_vwaddu.w_mask_wx_tie_nxv2i64_nxv2i64_i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv2i64_nxv2i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vwaddu.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1795,7 +1825,7 @@ %0, i32 %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1803,7 +1833,7 @@ define @intrinsic_vwaddu.w_mask_wx_tie_nxv4i64_nxv4i64_i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv4i64_nxv4i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vwaddu.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1812,7 +1842,7 @@ %0, i32 %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1820,7 +1850,7 @@ define @intrinsic_vwaddu.w_mask_wx_tie_nxv8i64_nxv8i64_i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv8i64_nxv8i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vwaddu.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1829,7 +1859,7 @@ %0, i32 %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmul-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmul-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwmul-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmul-rv32.ll @@ -27,12 +27,13 @@ , , , + i32, i32); define @intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vwmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -41,7 +42,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -72,12 +73,13 @@ , , , + i32, i32); define @intrinsic_vwmul_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vwmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -86,7 +88,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -117,12 +119,13 @@ , , , + i32, i32); define @intrinsic_vwmul_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vwmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -131,7 +134,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -162,12 +165,13 @@ , , , + i32, i32); define @intrinsic_vwmul_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vwmul.vv v8, v10, v11, v0.t ; CHECK-NEXT: ret entry: @@ -176,7 +180,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -207,12 +211,13 @@ , , , + i32, i32); define @intrinsic_vwmul_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vwmul.vv v8, v12, v14, v0.t ; CHECK-NEXT: ret entry: @@ -221,7 +226,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -252,12 +257,13 @@ , , , + i32, i32); define @intrinsic_vwmul_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vwmul.vv v8, v16, v20, v0.t ; CHECK-NEXT: ret entry: @@ -266,7 +272,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -297,12 +303,13 @@ , , , + i32, i32); define @intrinsic_vwmul_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vwmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -311,7 +318,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -342,12 +349,13 @@ , , , + i32, i32); define @intrinsic_vwmul_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vwmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -356,7 +364,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -387,12 +395,13 @@ , , , + i32, i32); define @intrinsic_vwmul_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vwmul.vv v8, v10, v11, v0.t ; CHECK-NEXT: ret entry: @@ -401,7 +410,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -432,12 +441,13 @@ , , , + i32, i32); define @intrinsic_vwmul_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vwmul.vv v8, v12, v14, v0.t ; CHECK-NEXT: ret entry: @@ -446,7 +456,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -477,12 +487,13 @@ , , , + i32, i32); define @intrinsic_vwmul_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vwmul.vv v8, v16, v20, v0.t ; CHECK-NEXT: ret entry: @@ -491,7 +502,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -522,12 +533,13 @@ , , , + i32, i32); define @intrinsic_vwmul_mask_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vwmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -536,7 +548,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -567,12 +579,13 @@ , , , + i32, i32); define @intrinsic_vwmul_mask_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vwmul.vv v8, v10, v11, v0.t ; CHECK-NEXT: ret entry: @@ -581,7 +594,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -612,12 +625,13 @@ , , , + i32, i32); define @intrinsic_vwmul_mask_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vwmul.vv v8, v12, v14, v0.t ; CHECK-NEXT: ret entry: @@ -626,7 +640,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -657,12 +671,13 @@ , , , + i32, i32); define @intrinsic_vwmul_mask_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vwmul.vv v8, v16, v20, v0.t ; CHECK-NEXT: ret entry: @@ -671,7 +686,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -702,12 +717,13 @@ , i8, , + i32, i32); define @intrinsic_vwmul_mask_vx_nxv1i16_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv1i16_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vwmul.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -716,7 +732,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -747,12 +763,13 @@ , i8, , + i32, i32); define @intrinsic_vwmul_mask_vx_nxv2i16_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv2i16_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vwmul.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -761,7 +778,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -792,12 +809,13 @@ , i8, , + i32, i32); define @intrinsic_vwmul_mask_vx_nxv4i16_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv4i16_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vwmul.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -806,7 +824,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -837,12 +855,13 @@ , i8, , + i32, i32); define @intrinsic_vwmul_mask_vx_nxv8i16_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv8i16_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vwmul.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -851,7 +870,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -882,12 +901,13 @@ , i8, , + i32, i32); define @intrinsic_vwmul_mask_vx_nxv16i16_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv16i16_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vwmul.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -896,7 +916,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -927,12 +947,13 @@ , i8, , + i32, i32); define @intrinsic_vwmul_mask_vx_nxv32i16_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv32i16_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vwmul.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -941,7 +962,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -972,12 +993,13 @@ , i16, , + i32, i32); define @intrinsic_vwmul_mask_vx_nxv1i32_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv1i32_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vwmul.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -986,7 +1008,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1017,12 +1039,13 @@ , i16, , + i32, i32); define @intrinsic_vwmul_mask_vx_nxv2i32_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv2i32_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vwmul.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1031,7 +1054,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1062,12 +1085,13 @@ , i16, , + i32, i32); define @intrinsic_vwmul_mask_vx_nxv4i32_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv4i32_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vwmul.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1076,7 +1100,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1107,12 +1131,13 @@ , i16, , + i32, i32); define @intrinsic_vwmul_mask_vx_nxv8i32_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv8i32_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vwmul.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1121,7 +1146,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1152,12 +1177,13 @@ , i16, , + i32, i32); define @intrinsic_vwmul_mask_vx_nxv16i32_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv16i32_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vwmul.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1166,7 +1192,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1197,12 +1223,13 @@ , i32, , + i32, i32); define @intrinsic_vwmul_mask_vx_nxv1i64_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv1i64_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vwmul.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1211,7 +1238,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1242,12 +1269,13 @@ , i32, , + i32, i32); define @intrinsic_vwmul_mask_vx_nxv2i64_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv2i64_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vwmul.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1256,7 +1284,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1287,12 +1315,13 @@ , i32, , + i32, i32); define @intrinsic_vwmul_mask_vx_nxv4i64_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv4i64_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vwmul.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1301,7 +1330,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1332,12 +1361,13 @@ , i32, , + i32, i32); define @intrinsic_vwmul_mask_vx_nxv8i64_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv8i64_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vwmul.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1346,7 +1376,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmul-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmul-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwmul-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmul-rv64.ll @@ -27,12 +27,13 @@ , , , + i64, i64); define @intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vwmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -41,7 +42,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -72,12 +73,13 @@ , , , + i64, i64); define @intrinsic_vwmul_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vwmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -86,7 +88,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -117,12 +119,13 @@ , , , + i64, i64); define @intrinsic_vwmul_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vwmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -131,7 +134,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -162,12 +165,13 @@ , , , + i64, i64); define @intrinsic_vwmul_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vwmul.vv v8, v10, v11, v0.t ; CHECK-NEXT: ret entry: @@ -176,7 +180,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -207,12 +211,13 @@ , , , + i64, i64); define @intrinsic_vwmul_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vwmul.vv v8, v12, v14, v0.t ; CHECK-NEXT: ret entry: @@ -221,7 +226,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -252,12 +257,13 @@ , , , + i64, i64); define @intrinsic_vwmul_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vwmul.vv v8, v16, v20, v0.t ; CHECK-NEXT: ret entry: @@ -266,7 +272,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -297,12 +303,13 @@ , , , + i64, i64); define @intrinsic_vwmul_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vwmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -311,7 +318,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -342,12 +349,13 @@ , , , + i64, i64); define @intrinsic_vwmul_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vwmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -356,7 +364,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -387,12 +395,13 @@ , , , + i64, i64); define @intrinsic_vwmul_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vwmul.vv v8, v10, v11, v0.t ; CHECK-NEXT: ret entry: @@ -401,7 +410,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -432,12 +441,13 @@ , , , + i64, i64); define @intrinsic_vwmul_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vwmul.vv v8, v12, v14, v0.t ; CHECK-NEXT: ret entry: @@ -446,7 +456,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -477,12 +487,13 @@ , , , + i64, i64); define @intrinsic_vwmul_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vwmul.vv v8, v16, v20, v0.t ; CHECK-NEXT: ret entry: @@ -491,7 +502,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -522,12 +533,13 @@ , , , + i64, i64); define @intrinsic_vwmul_mask_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vwmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -536,7 +548,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -567,12 +579,13 @@ , , , + i64, i64); define @intrinsic_vwmul_mask_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vwmul.vv v8, v10, v11, v0.t ; CHECK-NEXT: ret entry: @@ -581,7 +594,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -612,12 +625,13 @@ , , , + i64, i64); define @intrinsic_vwmul_mask_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vwmul.vv v8, v12, v14, v0.t ; CHECK-NEXT: ret entry: @@ -626,7 +640,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -657,12 +671,13 @@ , , , + i64, i64); define @intrinsic_vwmul_mask_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vwmul.vv v8, v16, v20, v0.t ; CHECK-NEXT: ret entry: @@ -671,7 +686,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -702,12 +717,13 @@ , i8, , + i64, i64); define @intrinsic_vwmul_mask_vx_nxv1i16_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv1i16_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vwmul.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -716,7 +732,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -747,12 +763,13 @@ , i8, , + i64, i64); define @intrinsic_vwmul_mask_vx_nxv2i16_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv2i16_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vwmul.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -761,7 +778,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -792,12 +809,13 @@ , i8, , + i64, i64); define @intrinsic_vwmul_mask_vx_nxv4i16_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv4i16_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vwmul.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -806,7 +824,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -837,12 +855,13 @@ , i8, , + i64, i64); define @intrinsic_vwmul_mask_vx_nxv8i16_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv8i16_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vwmul.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -851,7 +870,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -882,12 +901,13 @@ , i8, , + i64, i64); define @intrinsic_vwmul_mask_vx_nxv16i16_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv16i16_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vwmul.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -896,7 +916,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -927,12 +947,13 @@ , i8, , + i64, i64); define @intrinsic_vwmul_mask_vx_nxv32i16_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv32i16_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vwmul.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -941,7 +962,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -972,12 +993,13 @@ , i16, , + i64, i64); define @intrinsic_vwmul_mask_vx_nxv1i32_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv1i32_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vwmul.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -986,7 +1008,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1017,12 +1039,13 @@ , i16, , + i64, i64); define @intrinsic_vwmul_mask_vx_nxv2i32_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv2i32_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vwmul.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1031,7 +1054,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1062,12 +1085,13 @@ , i16, , + i64, i64); define @intrinsic_vwmul_mask_vx_nxv4i32_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv4i32_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vwmul.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1076,7 +1100,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1107,12 +1131,13 @@ , i16, , + i64, i64); define @intrinsic_vwmul_mask_vx_nxv8i32_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv8i32_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vwmul.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1121,7 +1146,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1152,12 +1177,13 @@ , i16, , + i64, i64); define @intrinsic_vwmul_mask_vx_nxv16i32_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv16i32_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vwmul.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1166,7 +1192,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1197,12 +1223,13 @@ , i32, , + i64, i64); define @intrinsic_vwmul_mask_vx_nxv1i64_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv1i64_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vwmul.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1211,7 +1238,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1242,12 +1269,13 @@ , i32, , + i64, i64); define @intrinsic_vwmul_mask_vx_nxv2i64_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv2i64_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vwmul.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1256,7 +1284,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1287,12 +1315,13 @@ , i32, , + i64, i64); define @intrinsic_vwmul_mask_vx_nxv4i64_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv4i64_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vwmul.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1301,7 +1330,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1332,12 +1361,13 @@ , i32, , + i64, i64); define @intrinsic_vwmul_mask_vx_nxv8i64_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv8i64_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vwmul.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1346,7 +1376,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv32.ll @@ -27,12 +27,13 @@ , , , + i32, i32); define @intrinsic_vwmulsu_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vwmulsu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -41,7 +42,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -72,12 +73,13 @@ , , , + i32, i32); define @intrinsic_vwmulsu_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vwmulsu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -86,7 +88,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -117,12 +119,13 @@ , , , + i32, i32); define @intrinsic_vwmulsu_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vwmulsu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -131,7 +134,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -162,12 +165,13 @@ , , , + i32, i32); define @intrinsic_vwmulsu_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vwmulsu.vv v8, v10, v11, v0.t ; CHECK-NEXT: ret entry: @@ -176,7 +180,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -207,12 +211,13 @@ , , , + i32, i32); define @intrinsic_vwmulsu_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vwmulsu.vv v8, v12, v14, v0.t ; CHECK-NEXT: ret entry: @@ -221,7 +226,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -252,12 +257,13 @@ , , , + i32, i32); define @intrinsic_vwmulsu_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vwmulsu.vv v8, v16, v20, v0.t ; CHECK-NEXT: ret entry: @@ -266,7 +272,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -297,12 +303,13 @@ , , , + i32, i32); define @intrinsic_vwmulsu_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vwmulsu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -311,7 +318,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -342,12 +349,13 @@ , , , + i32, i32); define @intrinsic_vwmulsu_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vwmulsu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -356,7 +364,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -387,12 +395,13 @@ , , , + i32, i32); define @intrinsic_vwmulsu_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vwmulsu.vv v8, v10, v11, v0.t ; CHECK-NEXT: ret entry: @@ -401,7 +410,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -432,12 +441,13 @@ , , , + i32, i32); define @intrinsic_vwmulsu_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vwmulsu.vv v8, v12, v14, v0.t ; CHECK-NEXT: ret entry: @@ -446,7 +456,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -477,12 +487,13 @@ , , , + i32, i32); define @intrinsic_vwmulsu_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vwmulsu.vv v8, v16, v20, v0.t ; CHECK-NEXT: ret entry: @@ -491,7 +502,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -522,12 +533,13 @@ , , , + i32, i32); define @intrinsic_vwmulsu_mask_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vwmulsu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -536,7 +548,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -567,12 +579,13 @@ , , , + i32, i32); define @intrinsic_vwmulsu_mask_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vwmulsu.vv v8, v10, v11, v0.t ; CHECK-NEXT: ret entry: @@ -581,7 +594,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -612,12 +625,13 @@ , , , + i32, i32); define @intrinsic_vwmulsu_mask_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vwmulsu.vv v8, v12, v14, v0.t ; CHECK-NEXT: ret entry: @@ -626,7 +640,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -657,12 +671,13 @@ , , , + i32, i32); define @intrinsic_vwmulsu_mask_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vwmulsu.vv v8, v16, v20, v0.t ; CHECK-NEXT: ret entry: @@ -671,7 +686,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -702,12 +717,13 @@ , i8, , + i32, i32); define @intrinsic_vwmulsu_mask_vx_nxv1i16_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv1i16_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vwmulsu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -716,7 +732,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -747,12 +763,13 @@ , i8, , + i32, i32); define @intrinsic_vwmulsu_mask_vx_nxv2i16_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv2i16_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vwmulsu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -761,7 +778,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -792,12 +809,13 @@ , i8, , + i32, i32); define @intrinsic_vwmulsu_mask_vx_nxv4i16_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv4i16_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vwmulsu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -806,7 +824,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -837,12 +855,13 @@ , i8, , + i32, i32); define @intrinsic_vwmulsu_mask_vx_nxv8i16_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv8i16_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vwmulsu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -851,7 +870,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -882,12 +901,13 @@ , i8, , + i32, i32); define @intrinsic_vwmulsu_mask_vx_nxv16i16_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv16i16_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vwmulsu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -896,7 +916,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -927,12 +947,13 @@ , i8, , + i32, i32); define @intrinsic_vwmulsu_mask_vx_nxv32i16_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv32i16_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vwmulsu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -941,7 +962,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -972,12 +993,13 @@ , i16, , + i32, i32); define @intrinsic_vwmulsu_mask_vx_nxv1i32_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv1i32_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vwmulsu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -986,7 +1008,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1017,12 +1039,13 @@ , i16, , + i32, i32); define @intrinsic_vwmulsu_mask_vx_nxv2i32_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv2i32_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vwmulsu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1031,7 +1054,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1062,12 +1085,13 @@ , i16, , + i32, i32); define @intrinsic_vwmulsu_mask_vx_nxv4i32_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv4i32_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vwmulsu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1076,7 +1100,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1107,12 +1131,13 @@ , i16, , + i32, i32); define @intrinsic_vwmulsu_mask_vx_nxv8i32_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv8i32_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vwmulsu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1121,7 +1146,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1152,12 +1177,13 @@ , i16, , + i32, i32); define @intrinsic_vwmulsu_mask_vx_nxv16i32_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv16i32_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vwmulsu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1166,7 +1192,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1197,12 +1223,13 @@ , i32, , + i32, i32); define @intrinsic_vwmulsu_mask_vx_nxv1i64_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv1i64_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vwmulsu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1211,7 +1238,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1242,12 +1269,13 @@ , i32, , + i32, i32); define @intrinsic_vwmulsu_mask_vx_nxv2i64_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv2i64_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vwmulsu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1256,7 +1284,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1287,12 +1315,13 @@ , i32, , + i32, i32); define @intrinsic_vwmulsu_mask_vx_nxv4i64_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv4i64_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vwmulsu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1301,7 +1330,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1332,12 +1361,13 @@ , i32, , + i32, i32); define @intrinsic_vwmulsu_mask_vx_nxv8i64_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv8i64_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vwmulsu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1346,7 +1376,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv64.ll @@ -27,12 +27,13 @@ , , , + i64, i64); define @intrinsic_vwmulsu_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vwmulsu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -41,7 +42,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -72,12 +73,13 @@ , , , + i64, i64); define @intrinsic_vwmulsu_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vwmulsu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -86,7 +88,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -117,12 +119,13 @@ , , , + i64, i64); define @intrinsic_vwmulsu_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vwmulsu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -131,7 +134,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -162,12 +165,13 @@ , , , + i64, i64); define @intrinsic_vwmulsu_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vwmulsu.vv v8, v10, v11, v0.t ; CHECK-NEXT: ret entry: @@ -176,7 +180,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -207,12 +211,13 @@ , , , + i64, i64); define @intrinsic_vwmulsu_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vwmulsu.vv v8, v12, v14, v0.t ; CHECK-NEXT: ret entry: @@ -221,7 +226,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -252,12 +257,13 @@ , , , + i64, i64); define @intrinsic_vwmulsu_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vwmulsu.vv v8, v16, v20, v0.t ; CHECK-NEXT: ret entry: @@ -266,7 +272,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -297,12 +303,13 @@ , , , + i64, i64); define @intrinsic_vwmulsu_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vwmulsu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -311,7 +318,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -342,12 +349,13 @@ , , , + i64, i64); define @intrinsic_vwmulsu_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vwmulsu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -356,7 +364,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -387,12 +395,13 @@ , , , + i64, i64); define @intrinsic_vwmulsu_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vwmulsu.vv v8, v10, v11, v0.t ; CHECK-NEXT: ret entry: @@ -401,7 +410,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -432,12 +441,13 @@ , , , + i64, i64); define @intrinsic_vwmulsu_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vwmulsu.vv v8, v12, v14, v0.t ; CHECK-NEXT: ret entry: @@ -446,7 +456,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -477,12 +487,13 @@ , , , + i64, i64); define @intrinsic_vwmulsu_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vwmulsu.vv v8, v16, v20, v0.t ; CHECK-NEXT: ret entry: @@ -491,7 +502,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -522,12 +533,13 @@ , , , + i64, i64); define @intrinsic_vwmulsu_mask_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vwmulsu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -536,7 +548,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -567,12 +579,13 @@ , , , + i64, i64); define @intrinsic_vwmulsu_mask_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vwmulsu.vv v8, v10, v11, v0.t ; CHECK-NEXT: ret entry: @@ -581,7 +594,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -612,12 +625,13 @@ , , , + i64, i64); define @intrinsic_vwmulsu_mask_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vwmulsu.vv v8, v12, v14, v0.t ; CHECK-NEXT: ret entry: @@ -626,7 +640,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -657,12 +671,13 @@ , , , + i64, i64); define @intrinsic_vwmulsu_mask_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vwmulsu.vv v8, v16, v20, v0.t ; CHECK-NEXT: ret entry: @@ -671,7 +686,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -702,12 +717,13 @@ , i8, , + i64, i64); define @intrinsic_vwmulsu_mask_vx_nxv1i16_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv1i16_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vwmulsu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -716,7 +732,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -747,12 +763,13 @@ , i8, , + i64, i64); define @intrinsic_vwmulsu_mask_vx_nxv2i16_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv2i16_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vwmulsu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -761,7 +778,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -792,12 +809,13 @@ , i8, , + i64, i64); define @intrinsic_vwmulsu_mask_vx_nxv4i16_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv4i16_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vwmulsu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -806,7 +824,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -837,12 +855,13 @@ , i8, , + i64, i64); define @intrinsic_vwmulsu_mask_vx_nxv8i16_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv8i16_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vwmulsu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -851,7 +870,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -882,12 +901,13 @@ , i8, , + i64, i64); define @intrinsic_vwmulsu_mask_vx_nxv16i16_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv16i16_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vwmulsu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -896,7 +916,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -927,12 +947,13 @@ , i8, , + i64, i64); define @intrinsic_vwmulsu_mask_vx_nxv32i16_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv32i16_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vwmulsu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -941,7 +962,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -972,12 +993,13 @@ , i16, , + i64, i64); define @intrinsic_vwmulsu_mask_vx_nxv1i32_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv1i32_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vwmulsu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -986,7 +1008,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1017,12 +1039,13 @@ , i16, , + i64, i64); define @intrinsic_vwmulsu_mask_vx_nxv2i32_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv2i32_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vwmulsu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1031,7 +1054,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1062,12 +1085,13 @@ , i16, , + i64, i64); define @intrinsic_vwmulsu_mask_vx_nxv4i32_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv4i32_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vwmulsu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1076,7 +1100,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1107,12 +1131,13 @@ , i16, , + i64, i64); define @intrinsic_vwmulsu_mask_vx_nxv8i32_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv8i32_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vwmulsu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1121,7 +1146,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1152,12 +1177,13 @@ , i16, , + i64, i64); define @intrinsic_vwmulsu_mask_vx_nxv16i32_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv16i32_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vwmulsu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1166,7 +1192,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1197,12 +1223,13 @@ , i32, , + i64, i64); define @intrinsic_vwmulsu_mask_vx_nxv1i64_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv1i64_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vwmulsu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1211,7 +1238,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1242,12 +1269,13 @@ , i32, , + i64, i64); define @intrinsic_vwmulsu_mask_vx_nxv2i64_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv2i64_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vwmulsu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1256,7 +1284,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1287,12 +1315,13 @@ , i32, , + i64, i64); define @intrinsic_vwmulsu_mask_vx_nxv4i64_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv4i64_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vwmulsu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1301,7 +1330,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1332,12 +1361,13 @@ , i32, , + i64, i64); define @intrinsic_vwmulsu_mask_vx_nxv8i64_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv8i64_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vwmulsu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1346,7 +1376,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv32.ll @@ -27,12 +27,13 @@ , , , + i32, i32); define @intrinsic_vwmulu_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vwmulu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -41,7 +42,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -72,12 +73,13 @@ , , , + i32, i32); define @intrinsic_vwmulu_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vwmulu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -86,7 +88,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -117,12 +119,13 @@ , , , + i32, i32); define @intrinsic_vwmulu_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vwmulu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -131,7 +134,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -162,12 +165,13 @@ , , , + i32, i32); define @intrinsic_vwmulu_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vwmulu.vv v8, v10, v11, v0.t ; CHECK-NEXT: ret entry: @@ -176,7 +180,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -207,12 +211,13 @@ , , , + i32, i32); define @intrinsic_vwmulu_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vwmulu.vv v8, v12, v14, v0.t ; CHECK-NEXT: ret entry: @@ -221,7 +226,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -252,12 +257,13 @@ , , , + i32, i32); define @intrinsic_vwmulu_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vwmulu.vv v8, v16, v20, v0.t ; CHECK-NEXT: ret entry: @@ -266,7 +272,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -297,12 +303,13 @@ , , , + i32, i32); define @intrinsic_vwmulu_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vwmulu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -311,7 +318,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -342,12 +349,13 @@ , , , + i32, i32); define @intrinsic_vwmulu_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vwmulu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -356,7 +364,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -387,12 +395,13 @@ , , , + i32, i32); define @intrinsic_vwmulu_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vwmulu.vv v8, v10, v11, v0.t ; CHECK-NEXT: ret entry: @@ -401,7 +410,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -432,12 +441,13 @@ , , , + i32, i32); define @intrinsic_vwmulu_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vwmulu.vv v8, v12, v14, v0.t ; CHECK-NEXT: ret entry: @@ -446,7 +456,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -477,12 +487,13 @@ , , , + i32, i32); define @intrinsic_vwmulu_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vwmulu.vv v8, v16, v20, v0.t ; CHECK-NEXT: ret entry: @@ -491,7 +502,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -522,12 +533,13 @@ , , , + i32, i32); define @intrinsic_vwmulu_mask_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vwmulu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -536,7 +548,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -567,12 +579,13 @@ , , , + i32, i32); define @intrinsic_vwmulu_mask_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vwmulu.vv v8, v10, v11, v0.t ; CHECK-NEXT: ret entry: @@ -581,7 +594,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -612,12 +625,13 @@ , , , + i32, i32); define @intrinsic_vwmulu_mask_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vwmulu.vv v8, v12, v14, v0.t ; CHECK-NEXT: ret entry: @@ -626,7 +640,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -657,12 +671,13 @@ , , , + i32, i32); define @intrinsic_vwmulu_mask_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vwmulu.vv v8, v16, v20, v0.t ; CHECK-NEXT: ret entry: @@ -671,7 +686,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -702,12 +717,13 @@ , i8, , + i32, i32); define @intrinsic_vwmulu_mask_vx_nxv1i16_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv1i16_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vwmulu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -716,7 +732,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -747,12 +763,13 @@ , i8, , + i32, i32); define @intrinsic_vwmulu_mask_vx_nxv2i16_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv2i16_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vwmulu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -761,7 +778,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -792,12 +809,13 @@ , i8, , + i32, i32); define @intrinsic_vwmulu_mask_vx_nxv4i16_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv4i16_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vwmulu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -806,7 +824,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -837,12 +855,13 @@ , i8, , + i32, i32); define @intrinsic_vwmulu_mask_vx_nxv8i16_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv8i16_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vwmulu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -851,7 +870,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -882,12 +901,13 @@ , i8, , + i32, i32); define @intrinsic_vwmulu_mask_vx_nxv16i16_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv16i16_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vwmulu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -896,7 +916,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -927,12 +947,13 @@ , i8, , + i32, i32); define @intrinsic_vwmulu_mask_vx_nxv32i16_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv32i16_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vwmulu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -941,7 +962,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -972,12 +993,13 @@ , i16, , + i32, i32); define @intrinsic_vwmulu_mask_vx_nxv1i32_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv1i32_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vwmulu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -986,7 +1008,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1017,12 +1039,13 @@ , i16, , + i32, i32); define @intrinsic_vwmulu_mask_vx_nxv2i32_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv2i32_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vwmulu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1031,7 +1054,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1062,12 +1085,13 @@ , i16, , + i32, i32); define @intrinsic_vwmulu_mask_vx_nxv4i32_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv4i32_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vwmulu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1076,7 +1100,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1107,12 +1131,13 @@ , i16, , + i32, i32); define @intrinsic_vwmulu_mask_vx_nxv8i32_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv8i32_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vwmulu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1121,7 +1146,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1152,12 +1177,13 @@ , i16, , + i32, i32); define @intrinsic_vwmulu_mask_vx_nxv16i32_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv16i32_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vwmulu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1166,7 +1192,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1197,12 +1223,13 @@ , i32, , + i32, i32); define @intrinsic_vwmulu_mask_vx_nxv1i64_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv1i64_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vwmulu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1211,7 +1238,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1242,12 +1269,13 @@ , i32, , + i32, i32); define @intrinsic_vwmulu_mask_vx_nxv2i64_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv2i64_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vwmulu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1256,7 +1284,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1287,12 +1315,13 @@ , i32, , + i32, i32); define @intrinsic_vwmulu_mask_vx_nxv4i64_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv4i64_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vwmulu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1301,7 +1330,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1332,12 +1361,13 @@ , i32, , + i32, i32); define @intrinsic_vwmulu_mask_vx_nxv8i64_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv8i64_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vwmulu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1346,7 +1376,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv64.ll @@ -27,12 +27,13 @@ , , , + i64, i64); define @intrinsic_vwmulu_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vwmulu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -41,7 +42,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -72,12 +73,13 @@ , , , + i64, i64); define @intrinsic_vwmulu_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vwmulu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -86,7 +88,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -117,12 +119,13 @@ , , , + i64, i64); define @intrinsic_vwmulu_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vwmulu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -131,7 +134,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -162,12 +165,13 @@ , , , + i64, i64); define @intrinsic_vwmulu_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vwmulu.vv v8, v10, v11, v0.t ; CHECK-NEXT: ret entry: @@ -176,7 +180,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -207,12 +211,13 @@ , , , + i64, i64); define @intrinsic_vwmulu_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vwmulu.vv v8, v12, v14, v0.t ; CHECK-NEXT: ret entry: @@ -221,7 +226,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -252,12 +257,13 @@ , , , + i64, i64); define @intrinsic_vwmulu_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vwmulu.vv v8, v16, v20, v0.t ; CHECK-NEXT: ret entry: @@ -266,7 +272,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -297,12 +303,13 @@ , , , + i64, i64); define @intrinsic_vwmulu_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vwmulu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -311,7 +318,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -342,12 +349,13 @@ , , , + i64, i64); define @intrinsic_vwmulu_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vwmulu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -356,7 +364,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -387,12 +395,13 @@ , , , + i64, i64); define @intrinsic_vwmulu_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vwmulu.vv v8, v10, v11, v0.t ; CHECK-NEXT: ret entry: @@ -401,7 +410,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -432,12 +441,13 @@ , , , + i64, i64); define @intrinsic_vwmulu_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vwmulu.vv v8, v12, v14, v0.t ; CHECK-NEXT: ret entry: @@ -446,7 +456,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -477,12 +487,13 @@ , , , + i64, i64); define @intrinsic_vwmulu_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vwmulu.vv v8, v16, v20, v0.t ; CHECK-NEXT: ret entry: @@ -491,7 +502,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -522,12 +533,13 @@ , , , + i64, i64); define @intrinsic_vwmulu_mask_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vwmulu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -536,7 +548,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -567,12 +579,13 @@ , , , + i64, i64); define @intrinsic_vwmulu_mask_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vwmulu.vv v8, v10, v11, v0.t ; CHECK-NEXT: ret entry: @@ -581,7 +594,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -612,12 +625,13 @@ , , , + i64, i64); define @intrinsic_vwmulu_mask_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vwmulu.vv v8, v12, v14, v0.t ; CHECK-NEXT: ret entry: @@ -626,7 +640,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -657,12 +671,13 @@ , , , + i64, i64); define @intrinsic_vwmulu_mask_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vwmulu.vv v8, v16, v20, v0.t ; CHECK-NEXT: ret entry: @@ -671,7 +686,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -702,12 +717,13 @@ , i8, , + i64, i64); define @intrinsic_vwmulu_mask_vx_nxv1i16_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv1i16_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vwmulu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -716,7 +732,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -747,12 +763,13 @@ , i8, , + i64, i64); define @intrinsic_vwmulu_mask_vx_nxv2i16_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv2i16_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vwmulu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -761,7 +778,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -792,12 +809,13 @@ , i8, , + i64, i64); define @intrinsic_vwmulu_mask_vx_nxv4i16_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv4i16_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vwmulu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -806,7 +824,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -837,12 +855,13 @@ , i8, , + i64, i64); define @intrinsic_vwmulu_mask_vx_nxv8i16_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv8i16_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vwmulu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -851,7 +870,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -882,12 +901,13 @@ , i8, , + i64, i64); define @intrinsic_vwmulu_mask_vx_nxv16i16_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv16i16_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vwmulu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -896,7 +916,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -927,12 +947,13 @@ , i8, , + i64, i64); define @intrinsic_vwmulu_mask_vx_nxv32i16_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv32i16_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vwmulu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -941,7 +962,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -972,12 +993,13 @@ , i16, , + i64, i64); define @intrinsic_vwmulu_mask_vx_nxv1i32_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv1i32_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vwmulu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -986,7 +1008,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1017,12 +1039,13 @@ , i16, , + i64, i64); define @intrinsic_vwmulu_mask_vx_nxv2i32_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv2i32_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vwmulu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1031,7 +1054,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1062,12 +1085,13 @@ , i16, , + i64, i64); define @intrinsic_vwmulu_mask_vx_nxv4i32_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv4i32_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vwmulu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1076,7 +1100,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1107,12 +1131,13 @@ , i16, , + i64, i64); define @intrinsic_vwmulu_mask_vx_nxv8i32_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv8i32_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vwmulu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1121,7 +1146,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1152,12 +1177,13 @@ , i16, , + i64, i64); define @intrinsic_vwmulu_mask_vx_nxv16i32_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv16i32_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vwmulu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1166,7 +1192,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1197,12 +1223,13 @@ , i32, , + i64, i64); define @intrinsic_vwmulu_mask_vx_nxv1i64_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv1i64_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vwmulu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1211,7 +1238,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1242,12 +1269,13 @@ , i32, , + i64, i64); define @intrinsic_vwmulu_mask_vx_nxv2i64_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv2i64_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vwmulu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1256,7 +1284,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1287,12 +1315,13 @@ , i32, , + i64, i64); define @intrinsic_vwmulu_mask_vx_nxv4i64_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv4i64_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vwmulu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1301,7 +1330,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1332,12 +1361,13 @@ , i32, , + i64, i64); define @intrinsic_vwmulu_mask_vx_nxv8i64_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv8i64_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vwmulu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1346,7 +1376,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwsub-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwsub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsub-rv32.ll @@ -27,12 +27,13 @@ , , , + i32, i32); define @intrinsic_vwsub_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vwsub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -41,7 +42,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -72,12 +73,13 @@ , , , + i32, i32); define @intrinsic_vwsub_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vwsub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -86,7 +88,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -117,12 +119,13 @@ , , , + i32, i32); define @intrinsic_vwsub_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vwsub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -131,7 +134,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -162,12 +165,13 @@ , , , + i32, i32); define @intrinsic_vwsub_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vwsub.vv v8, v10, v11, v0.t ; CHECK-NEXT: ret entry: @@ -176,7 +180,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -207,12 +211,13 @@ , , , + i32, i32); define @intrinsic_vwsub_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vwsub.vv v8, v12, v14, v0.t ; CHECK-NEXT: ret entry: @@ -221,7 +226,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -252,12 +257,13 @@ , , , + i32, i32); define @intrinsic_vwsub_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vwsub.vv v8, v16, v20, v0.t ; CHECK-NEXT: ret entry: @@ -266,7 +272,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -297,12 +303,13 @@ , , , + i32, i32); define @intrinsic_vwsub_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vwsub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -311,7 +318,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -342,12 +349,13 @@ , , , + i32, i32); define @intrinsic_vwsub_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vwsub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -356,7 +364,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -387,12 +395,13 @@ , , , + i32, i32); define @intrinsic_vwsub_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vwsub.vv v8, v10, v11, v0.t ; CHECK-NEXT: ret entry: @@ -401,7 +410,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -432,12 +441,13 @@ , , , + i32, i32); define @intrinsic_vwsub_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vwsub.vv v8, v12, v14, v0.t ; CHECK-NEXT: ret entry: @@ -446,7 +456,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -477,12 +487,13 @@ , , , + i32, i32); define @intrinsic_vwsub_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vwsub.vv v8, v16, v20, v0.t ; CHECK-NEXT: ret entry: @@ -491,7 +502,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -522,12 +533,13 @@ , , , + i32, i32); define @intrinsic_vwsub_mask_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vwsub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -536,7 +548,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -567,12 +579,13 @@ , , , + i32, i32); define @intrinsic_vwsub_mask_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vwsub.vv v8, v10, v11, v0.t ; CHECK-NEXT: ret entry: @@ -581,7 +594,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -612,12 +625,13 @@ , , , + i32, i32); define @intrinsic_vwsub_mask_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vwsub.vv v8, v12, v14, v0.t ; CHECK-NEXT: ret entry: @@ -626,7 +640,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -657,12 +671,13 @@ , , , + i32, i32); define @intrinsic_vwsub_mask_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vwsub.vv v8, v16, v20, v0.t ; CHECK-NEXT: ret entry: @@ -671,7 +686,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -702,12 +717,13 @@ , i8, , + i32, i32); define @intrinsic_vwsub_mask_vx_nxv1i16_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv1i16_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vwsub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -716,7 +732,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -747,12 +763,13 @@ , i8, , + i32, i32); define @intrinsic_vwsub_mask_vx_nxv2i16_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv2i16_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vwsub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -761,7 +778,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -792,12 +809,13 @@ , i8, , + i32, i32); define @intrinsic_vwsub_mask_vx_nxv4i16_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv4i16_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vwsub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -806,7 +824,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -837,12 +855,13 @@ , i8, , + i32, i32); define @intrinsic_vwsub_mask_vx_nxv8i16_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv8i16_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vwsub.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -851,7 +870,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -882,12 +901,13 @@ , i8, , + i32, i32); define @intrinsic_vwsub_mask_vx_nxv16i16_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv16i16_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vwsub.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -896,7 +916,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -927,12 +947,13 @@ , i8, , + i32, i32); define @intrinsic_vwsub_mask_vx_nxv32i16_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv32i16_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vwsub.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -941,7 +962,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -972,12 +993,13 @@ , i16, , + i32, i32); define @intrinsic_vwsub_mask_vx_nxv1i32_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv1i32_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vwsub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -986,7 +1008,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1017,12 +1039,13 @@ , i16, , + i32, i32); define @intrinsic_vwsub_mask_vx_nxv2i32_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv2i32_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vwsub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1031,7 +1054,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1062,12 +1085,13 @@ , i16, , + i32, i32); define @intrinsic_vwsub_mask_vx_nxv4i32_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv4i32_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vwsub.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1076,7 +1100,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1107,12 +1131,13 @@ , i16, , + i32, i32); define @intrinsic_vwsub_mask_vx_nxv8i32_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv8i32_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vwsub.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1121,7 +1146,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1152,12 +1177,13 @@ , i16, , + i32, i32); define @intrinsic_vwsub_mask_vx_nxv16i32_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv16i32_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vwsub.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1166,7 +1192,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1197,12 +1223,13 @@ , i32, , + i32, i32); define @intrinsic_vwsub_mask_vx_nxv1i64_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv1i64_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vwsub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1211,7 +1238,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1242,12 +1269,13 @@ , i32, , + i32, i32); define @intrinsic_vwsub_mask_vx_nxv2i64_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv2i64_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vwsub.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1256,7 +1284,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1287,12 +1315,13 @@ , i32, , + i32, i32); define @intrinsic_vwsub_mask_vx_nxv4i64_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv4i64_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vwsub.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1301,7 +1330,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1332,12 +1361,13 @@ , i32, , + i32, i32); define @intrinsic_vwsub_mask_vx_nxv8i64_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv8i64_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vwsub.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1346,7 +1376,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwsub-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwsub-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsub-rv64.ll @@ -27,12 +27,13 @@ , , , + i64, i64); define @intrinsic_vwsub_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vwsub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -41,7 +42,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -72,12 +73,13 @@ , , , + i64, i64); define @intrinsic_vwsub_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vwsub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -86,7 +88,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -117,12 +119,13 @@ , , , + i64, i64); define @intrinsic_vwsub_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vwsub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -131,7 +134,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -162,12 +165,13 @@ , , , + i64, i64); define @intrinsic_vwsub_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vwsub.vv v8, v10, v11, v0.t ; CHECK-NEXT: ret entry: @@ -176,7 +180,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -207,12 +211,13 @@ , , , + i64, i64); define @intrinsic_vwsub_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vwsub.vv v8, v12, v14, v0.t ; CHECK-NEXT: ret entry: @@ -221,7 +226,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -252,12 +257,13 @@ , , , + i64, i64); define @intrinsic_vwsub_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vwsub.vv v8, v16, v20, v0.t ; CHECK-NEXT: ret entry: @@ -266,7 +272,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -297,12 +303,13 @@ , , , + i64, i64); define @intrinsic_vwsub_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vwsub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -311,7 +318,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -342,12 +349,13 @@ , , , + i64, i64); define @intrinsic_vwsub_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vwsub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -356,7 +364,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -387,12 +395,13 @@ , , , + i64, i64); define @intrinsic_vwsub_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vwsub.vv v8, v10, v11, v0.t ; CHECK-NEXT: ret entry: @@ -401,7 +410,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -432,12 +441,13 @@ , , , + i64, i64); define @intrinsic_vwsub_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vwsub.vv v8, v12, v14, v0.t ; CHECK-NEXT: ret entry: @@ -446,7 +456,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -477,12 +487,13 @@ , , , + i64, i64); define @intrinsic_vwsub_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vwsub.vv v8, v16, v20, v0.t ; CHECK-NEXT: ret entry: @@ -491,7 +502,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -522,12 +533,13 @@ , , , + i64, i64); define @intrinsic_vwsub_mask_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vwsub.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -536,7 +548,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -567,12 +579,13 @@ , , , + i64, i64); define @intrinsic_vwsub_mask_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vwsub.vv v8, v10, v11, v0.t ; CHECK-NEXT: ret entry: @@ -581,7 +594,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -612,12 +625,13 @@ , , , + i64, i64); define @intrinsic_vwsub_mask_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vwsub.vv v8, v12, v14, v0.t ; CHECK-NEXT: ret entry: @@ -626,7 +640,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -657,12 +671,13 @@ , , , + i64, i64); define @intrinsic_vwsub_mask_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vwsub.vv v8, v16, v20, v0.t ; CHECK-NEXT: ret entry: @@ -671,7 +686,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -702,12 +717,13 @@ , i8, , + i64, i64); define @intrinsic_vwsub_mask_vx_nxv1i16_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv1i16_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vwsub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -716,7 +732,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -747,12 +763,13 @@ , i8, , + i64, i64); define @intrinsic_vwsub_mask_vx_nxv2i16_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv2i16_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vwsub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -761,7 +778,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -792,12 +809,13 @@ , i8, , + i64, i64); define @intrinsic_vwsub_mask_vx_nxv4i16_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv4i16_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vwsub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -806,7 +824,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -837,12 +855,13 @@ , i8, , + i64, i64); define @intrinsic_vwsub_mask_vx_nxv8i16_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv8i16_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vwsub.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -851,7 +870,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -882,12 +901,13 @@ , i8, , + i64, i64); define @intrinsic_vwsub_mask_vx_nxv16i16_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv16i16_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vwsub.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -896,7 +916,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -927,12 +947,13 @@ , i8, , + i64, i64); define @intrinsic_vwsub_mask_vx_nxv32i16_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv32i16_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vwsub.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -941,7 +962,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -972,12 +993,13 @@ , i16, , + i64, i64); define @intrinsic_vwsub_mask_vx_nxv1i32_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv1i32_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vwsub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -986,7 +1008,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1017,12 +1039,13 @@ , i16, , + i64, i64); define @intrinsic_vwsub_mask_vx_nxv2i32_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv2i32_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vwsub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1031,7 +1054,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1062,12 +1085,13 @@ , i16, , + i64, i64); define @intrinsic_vwsub_mask_vx_nxv4i32_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv4i32_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vwsub.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1076,7 +1100,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1107,12 +1131,13 @@ , i16, , + i64, i64); define @intrinsic_vwsub_mask_vx_nxv8i32_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv8i32_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vwsub.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1121,7 +1146,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1152,12 +1177,13 @@ , i16, , + i64, i64); define @intrinsic_vwsub_mask_vx_nxv16i32_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv16i32_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vwsub.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1166,7 +1192,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1197,12 +1223,13 @@ , i32, , + i64, i64); define @intrinsic_vwsub_mask_vx_nxv1i64_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv1i64_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vwsub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1211,7 +1238,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1242,12 +1269,13 @@ , i32, , + i64, i64); define @intrinsic_vwsub_mask_vx_nxv2i64_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv2i64_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vwsub.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1256,7 +1284,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1287,12 +1315,13 @@ , i32, , + i64, i64); define @intrinsic_vwsub_mask_vx_nxv4i64_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv4i64_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vwsub.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1301,7 +1330,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1332,12 +1361,13 @@ , i32, , + i64, i64); define @intrinsic_vwsub_mask_vx_nxv8i64_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv8i64_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vwsub.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1346,7 +1376,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv32.ll @@ -26,12 +26,13 @@ , , , + i32, i32); define @intrinsic_vwsub.w_mask_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vwsub.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -70,12 +71,13 @@ , , , + i32, i32); define @intrinsic_vwsub.w_mask_wv_nxv2i16_nxv2i16_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vwsub.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -114,12 +116,13 @@ , , , + i32, i32); define @intrinsic_vwsub.w_mask_wv_nxv4i16_nxv4i16_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vwsub.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -158,12 +161,13 @@ , , , + i32, i32); define @intrinsic_vwsub.w_mask_wv_nxv8i16_nxv8i16_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vwsub.wv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -202,12 +206,13 @@ , , , + i32, i32); define @intrinsic_vwsub.w_mask_wv_nxv16i16_nxv16i16_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vwsub.wv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -246,13 +251,14 @@ , , , + i32, i32); define @intrinsic_vwsub.w_mask_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl4r.v v28, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vwsub.wv v8, v16, v28, v0.t ; CHECK-NEXT: ret entry: @@ -261,7 +267,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -291,12 +297,13 @@ , , , + i32, i32); define @intrinsic_vwsub.w_mask_wv_nxv1i32_nxv1i32_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vwsub.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -335,12 +342,13 @@ , , , + i32, i32); define @intrinsic_vwsub.w_mask_wv_nxv2i32_nxv2i32_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vwsub.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -379,12 +387,13 @@ , , , + i32, i32); define @intrinsic_vwsub.w_mask_wv_nxv4i32_nxv4i32_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vwsub.wv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -423,12 +432,13 @@ , , , + i32, i32); define @intrinsic_vwsub.w_mask_wv_nxv8i32_nxv8i32_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vwsub.wv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -467,13 +477,14 @@ , , , + i32, i32); define @intrinsic_vwsub.w_mask_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl4re16.v v28, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vwsub.wv v8, v16, v28, v0.t ; CHECK-NEXT: ret entry: @@ -482,7 +493,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -512,12 +523,13 @@ , , , + i32, i32); define @intrinsic_vwsub.w_mask_wv_nxv1i64_nxv1i64_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vwsub.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -526,7 +538,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -556,12 +568,13 @@ , , , + i32, i32); define @intrinsic_vwsub.w_mask_wv_nxv2i64_nxv2i64_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vwsub.wv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -600,12 +613,13 @@ , , , + i32, i32); define @intrinsic_vwsub.w_mask_wv_nxv4i64_nxv4i64_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vwsub.wv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -644,13 +658,14 @@ , , , + i32, i32); define @intrinsic_vwsub.w_mask_wv_nxv8i64_nxv8i64_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl4re32.v v28, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vwsub.wv v8, v16, v28, v0.t ; CHECK-NEXT: ret entry: @@ -659,7 +674,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -689,12 +704,13 @@ , i8, , + i32, i32); define @intrinsic_vwsub.w_mask_wx_nxv1i16_nxv1i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv1i16_nxv1i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vwsub.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -703,7 +719,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -733,12 +749,13 @@ , i8, , + i32, i32); define @intrinsic_vwsub.w_mask_wx_nxv2i16_nxv2i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv2i16_nxv2i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vwsub.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -747,7 +764,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -777,12 +794,13 @@ , i8, , + i32, i32); define @intrinsic_vwsub.w_mask_wx_nxv4i16_nxv4i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv4i16_nxv4i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vwsub.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +809,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -821,12 +839,13 @@ , i8, , + i32, i32); define @intrinsic_vwsub.w_mask_wx_nxv8i16_nxv8i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv8i16_nxv8i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vwsub.wx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +854,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -865,12 +884,13 @@ , i8, , + i32, i32); define @intrinsic_vwsub.w_mask_wx_nxv16i16_nxv16i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv16i16_nxv16i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vwsub.wx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +899,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -909,12 +929,13 @@ , i8, , + i32, i32); define @intrinsic_vwsub.w_mask_wx_nxv32i16_nxv32i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv32i16_nxv32i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vwsub.wx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +944,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -953,12 +974,13 @@ , i16, , + i32, i32); define @intrinsic_vwsub.w_mask_wx_nxv1i32_nxv1i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv1i32_nxv1i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vwsub.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -967,7 +989,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -997,12 +1019,13 @@ , i16, , + i32, i32); define @intrinsic_vwsub.w_mask_wx_nxv2i32_nxv2i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv2i32_nxv2i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vwsub.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1011,7 +1034,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1041,12 +1064,13 @@ , i16, , + i32, i32); define @intrinsic_vwsub.w_mask_wx_nxv4i32_nxv4i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv4i32_nxv4i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vwsub.wx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1055,7 +1079,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1085,12 +1109,13 @@ , i16, , + i32, i32); define @intrinsic_vwsub.w_mask_wx_nxv8i32_nxv8i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv8i32_nxv8i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vwsub.wx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1099,7 +1124,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1129,12 +1154,13 @@ , i16, , + i32, i32); define @intrinsic_vwsub.w_mask_wx_nxv16i32_nxv16i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv16i32_nxv16i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vwsub.wx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1143,7 +1169,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1173,12 +1199,13 @@ , i32, , + i32, i32); define @intrinsic_vwsub.w_mask_wx_nxv1i64_nxv1i64_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv1i64_nxv1i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vwsub.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1187,7 +1214,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1217,12 +1244,13 @@ , i32, , + i32, i32); define @intrinsic_vwsub.w_mask_wx_nxv2i64_nxv2i64_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv2i64_nxv2i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vwsub.wx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1231,7 +1259,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1261,12 +1289,13 @@ , i32, , + i32, i32); define @intrinsic_vwsub.w_mask_wx_nxv4i64_nxv4i64_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv4i64_nxv4i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vwsub.wx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1275,7 +1304,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1305,12 +1334,13 @@ , i32, , + i32, i32); define @intrinsic_vwsub.w_mask_wx_nxv8i64_nxv8i64_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv8i64_nxv8i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vwsub.wx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1319,7 +1349,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1327,7 +1357,7 @@ define @intrinsic_vwsub.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vwsub.wv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -1336,7 +1366,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1344,7 +1374,7 @@ define @intrinsic_vwsub.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vwsub.wv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -1353,7 +1383,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1361,7 +1391,7 @@ define @intrinsic_vwsub.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vwsub.wv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -1370,7 +1400,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1378,7 +1408,7 @@ define @intrinsic_vwsub.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vwsub.wv v8, v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -1387,7 +1417,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1395,7 +1425,7 @@ define @intrinsic_vwsub.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vwsub.wv v8, v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -1404,7 +1434,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1412,7 +1442,7 @@ define @intrinsic_vwsub.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vwsub.wv v8, v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -1421,7 +1451,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1429,7 +1459,7 @@ define @intrinsic_vwsub.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vwsub.wv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -1438,7 +1468,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1446,7 +1476,7 @@ define @intrinsic_vwsub.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vwsub.wv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -1455,7 +1485,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1463,7 +1493,7 @@ define @intrinsic_vwsub.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vwsub.wv v8, v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -1472,7 +1502,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1480,7 +1510,7 @@ define @intrinsic_vwsub.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vwsub.wv v8, v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -1489,7 +1519,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1497,7 +1527,7 @@ define @intrinsic_vwsub.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vwsub.wv v8, v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -1506,7 +1536,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1514,7 +1544,7 @@ define @intrinsic_vwsub.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vwsub.wv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -1523,7 +1553,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1531,7 +1561,7 @@ define @intrinsic_vwsub.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vwsub.wv v8, v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -1540,7 +1570,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1548,7 +1578,7 @@ define @intrinsic_vwsub.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vwsub.wv v8, v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -1557,7 +1587,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1565,7 +1595,7 @@ define @intrinsic_vwsub.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vwsub.wv v8, v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -1574,7 +1604,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1582,7 +1612,7 @@ define @intrinsic_vwsub.w_mask_wx_tie_nxv1i16_nxv1i16_i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv1i16_nxv1i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vwsub.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1591,7 +1621,7 @@ %0, i8 %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1599,7 +1629,7 @@ define @intrinsic_vwsub.w_mask_wx_tie_nxv2i16_nxv2i16_i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv2i16_nxv2i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vwsub.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1608,7 +1638,7 @@ %0, i8 %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1616,7 +1646,7 @@ define @intrinsic_vwsub.w_mask_wx_tie_nxv4i16_nxv4i16_i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv4i16_nxv4i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vwsub.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1625,7 +1655,7 @@ %0, i8 %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1633,7 +1663,7 @@ define @intrinsic_vwsub.w_mask_wx_tie_nxv8i16_nxv8i16_i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv8i16_nxv8i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vwsub.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1642,7 +1672,7 @@ %0, i8 %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1650,7 +1680,7 @@ define @intrinsic_vwsub.w_mask_wx_tie_nxv16i16_nxv16i16_i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv16i16_nxv16i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vwsub.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1659,7 +1689,7 @@ %0, i8 %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1667,7 +1697,7 @@ define @intrinsic_vwsub.w_mask_wx_tie_nxv32i16_nxv32i16_i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv32i16_nxv32i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vwsub.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1676,7 +1706,7 @@ %0, i8 %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1684,7 +1714,7 @@ define @intrinsic_vwsub.w_mask_wx_tie_nxv1i32_nxv1i32_i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv1i32_nxv1i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vwsub.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1693,7 +1723,7 @@ %0, i16 %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1701,7 +1731,7 @@ define @intrinsic_vwsub.w_mask_wx_tie_nxv2i32_nxv2i32_i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv2i32_nxv2i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vwsub.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1710,7 +1740,7 @@ %0, i16 %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1718,7 +1748,7 @@ define @intrinsic_vwsub.w_mask_wx_tie_nxv4i32_nxv4i32_i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv4i32_nxv4i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vwsub.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1727,7 +1757,7 @@ %0, i16 %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1735,7 +1765,7 @@ define @intrinsic_vwsub.w_mask_wx_tie_nxv8i32_nxv8i32_i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv8i32_nxv8i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vwsub.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1744,7 +1774,7 @@ %0, i16 %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1752,7 +1782,7 @@ define @intrinsic_vwsub.w_mask_wx_tie_nxv16i32_nxv16i32_i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv16i32_nxv16i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vwsub.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1761,7 +1791,7 @@ %0, i16 %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1769,7 +1799,7 @@ define @intrinsic_vwsub.w_mask_wx_tie_nxv1i64_nxv1i64_i32( %0, i32 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv1i64_nxv1i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vwsub.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1778,7 +1808,7 @@ %0, i32 %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1786,7 +1816,7 @@ define @intrinsic_vwsub.w_mask_wx_tie_nxv2i64_nxv2i64_i32( %0, i32 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv2i64_nxv2i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vwsub.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1795,7 +1825,7 @@ %0, i32 %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1803,7 +1833,7 @@ define @intrinsic_vwsub.w_mask_wx_tie_nxv4i64_nxv4i64_i32( %0, i32 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv4i64_nxv4i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vwsub.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1812,7 +1842,7 @@ %0, i32 %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1820,7 +1850,7 @@ define @intrinsic_vwsub.w_mask_wx_tie_nxv8i64_nxv8i64_i32( %0, i32 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv8i64_nxv8i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vwsub.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1829,7 +1859,7 @@ %0, i32 %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv64.ll @@ -26,12 +26,13 @@ , , , + i64, i64); define @intrinsic_vwsub.w_mask_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vwsub.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -70,12 +71,13 @@ , , , + i64, i64); define @intrinsic_vwsub.w_mask_wv_nxv2i16_nxv2i16_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vwsub.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -114,12 +116,13 @@ , , , + i64, i64); define @intrinsic_vwsub.w_mask_wv_nxv4i16_nxv4i16_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vwsub.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -158,12 +161,13 @@ , , , + i64, i64); define @intrinsic_vwsub.w_mask_wv_nxv8i16_nxv8i16_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vwsub.wv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -202,12 +206,13 @@ , , , + i64, i64); define @intrinsic_vwsub.w_mask_wv_nxv16i16_nxv16i16_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vwsub.wv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -246,13 +251,14 @@ , , , + i64, i64); define @intrinsic_vwsub.w_mask_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl4r.v v28, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vwsub.wv v8, v16, v28, v0.t ; CHECK-NEXT: ret entry: @@ -261,7 +267,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -291,12 +297,13 @@ , , , + i64, i64); define @intrinsic_vwsub.w_mask_wv_nxv1i32_nxv1i32_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vwsub.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -335,12 +342,13 @@ , , , + i64, i64); define @intrinsic_vwsub.w_mask_wv_nxv2i32_nxv2i32_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vwsub.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -379,12 +387,13 @@ , , , + i64, i64); define @intrinsic_vwsub.w_mask_wv_nxv4i32_nxv4i32_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vwsub.wv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -423,12 +432,13 @@ , , , + i64, i64); define @intrinsic_vwsub.w_mask_wv_nxv8i32_nxv8i32_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vwsub.wv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -467,13 +477,14 @@ , , , + i64, i64); define @intrinsic_vwsub.w_mask_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl4re16.v v28, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vwsub.wv v8, v16, v28, v0.t ; CHECK-NEXT: ret entry: @@ -482,7 +493,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -512,12 +523,13 @@ , , , + i64, i64); define @intrinsic_vwsub.w_mask_wv_nxv1i64_nxv1i64_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vwsub.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -526,7 +538,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -556,12 +568,13 @@ , , , + i64, i64); define @intrinsic_vwsub.w_mask_wv_nxv2i64_nxv2i64_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vwsub.wv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -600,12 +613,13 @@ , , , + i64, i64); define @intrinsic_vwsub.w_mask_wv_nxv4i64_nxv4i64_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vwsub.wv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -644,13 +658,14 @@ , , , + i64, i64); define @intrinsic_vwsub.w_mask_wv_nxv8i64_nxv8i64_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl4re32.v v28, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vwsub.wv v8, v16, v28, v0.t ; CHECK-NEXT: ret entry: @@ -659,7 +674,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -689,12 +704,13 @@ , i8, , + i64, i64); define @intrinsic_vwsub.w_mask_wx_nxv1i16_nxv1i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv1i16_nxv1i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vwsub.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -703,7 +719,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -733,12 +749,13 @@ , i8, , + i64, i64); define @intrinsic_vwsub.w_mask_wx_nxv2i16_nxv2i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv2i16_nxv2i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vwsub.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -747,7 +764,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -777,12 +794,13 @@ , i8, , + i64, i64); define @intrinsic_vwsub.w_mask_wx_nxv4i16_nxv4i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv4i16_nxv4i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vwsub.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +809,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -821,12 +839,13 @@ , i8, , + i64, i64); define @intrinsic_vwsub.w_mask_wx_nxv8i16_nxv8i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv8i16_nxv8i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vwsub.wx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +854,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -865,12 +884,13 @@ , i8, , + i64, i64); define @intrinsic_vwsub.w_mask_wx_nxv16i16_nxv16i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv16i16_nxv16i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vwsub.wx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +899,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -909,12 +929,13 @@ , i8, , + i64, i64); define @intrinsic_vwsub.w_mask_wx_nxv32i16_nxv32i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv32i16_nxv32i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vwsub.wx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +944,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -953,12 +974,13 @@ , i16, , + i64, i64); define @intrinsic_vwsub.w_mask_wx_nxv1i32_nxv1i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv1i32_nxv1i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vwsub.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -967,7 +989,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -997,12 +1019,13 @@ , i16, , + i64, i64); define @intrinsic_vwsub.w_mask_wx_nxv2i32_nxv2i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv2i32_nxv2i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vwsub.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1011,7 +1034,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1041,12 +1064,13 @@ , i16, , + i64, i64); define @intrinsic_vwsub.w_mask_wx_nxv4i32_nxv4i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv4i32_nxv4i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vwsub.wx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1055,7 +1079,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1085,12 +1109,13 @@ , i16, , + i64, i64); define @intrinsic_vwsub.w_mask_wx_nxv8i32_nxv8i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv8i32_nxv8i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vwsub.wx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1099,7 +1124,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1129,12 +1154,13 @@ , i16, , + i64, i64); define @intrinsic_vwsub.w_mask_wx_nxv16i32_nxv16i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv16i32_nxv16i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vwsub.wx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1143,7 +1169,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1173,12 +1199,13 @@ , i32, , + i64, i64); define @intrinsic_vwsub.w_mask_wx_nxv1i64_nxv1i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv1i64_nxv1i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vwsub.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1187,7 +1214,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1217,12 +1244,13 @@ , i32, , + i64, i64); define @intrinsic_vwsub.w_mask_wx_nxv2i64_nxv2i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv2i64_nxv2i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vwsub.wx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1231,7 +1259,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1261,12 +1289,13 @@ , i32, , + i64, i64); define @intrinsic_vwsub.w_mask_wx_nxv4i64_nxv4i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv4i64_nxv4i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vwsub.wx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1275,7 +1304,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1305,12 +1334,13 @@ , i32, , + i64, i64); define @intrinsic_vwsub.w_mask_wx_nxv8i64_nxv8i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv8i64_nxv8i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vwsub.wx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1319,7 +1349,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1327,7 +1357,7 @@ define @intrinsic_vwsub.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vwsub.wv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -1336,7 +1366,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1344,7 +1374,7 @@ define @intrinsic_vwsub.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vwsub.wv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -1353,7 +1383,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1361,7 +1391,7 @@ define @intrinsic_vwsub.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vwsub.wv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -1370,7 +1400,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1378,7 +1408,7 @@ define @intrinsic_vwsub.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vwsub.wv v8, v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -1387,7 +1417,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1395,7 +1425,7 @@ define @intrinsic_vwsub.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vwsub.wv v8, v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -1404,7 +1434,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1412,7 +1442,7 @@ define @intrinsic_vwsub.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vwsub.wv v8, v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -1421,7 +1451,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1429,7 +1459,7 @@ define @intrinsic_vwsub.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vwsub.wv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -1438,7 +1468,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1446,7 +1476,7 @@ define @intrinsic_vwsub.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vwsub.wv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -1455,7 +1485,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1463,7 +1493,7 @@ define @intrinsic_vwsub.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vwsub.wv v8, v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -1472,7 +1502,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1480,7 +1510,7 @@ define @intrinsic_vwsub.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vwsub.wv v8, v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -1489,7 +1519,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1497,7 +1527,7 @@ define @intrinsic_vwsub.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vwsub.wv v8, v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -1506,7 +1536,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1514,7 +1544,7 @@ define @intrinsic_vwsub.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vwsub.wv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -1523,7 +1553,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1531,7 +1561,7 @@ define @intrinsic_vwsub.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vwsub.wv v8, v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -1540,7 +1570,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1548,7 +1578,7 @@ define @intrinsic_vwsub.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vwsub.wv v8, v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -1557,7 +1587,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1565,7 +1595,7 @@ define @intrinsic_vwsub.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vwsub.wv v8, v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -1574,7 +1604,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1582,7 +1612,7 @@ define @intrinsic_vwsub.w_mask_wx_tie_nxv1i16_nxv1i16_i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv1i16_nxv1i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vwsub.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1591,7 +1621,7 @@ %0, i8 %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1599,7 +1629,7 @@ define @intrinsic_vwsub.w_mask_wx_tie_nxv2i16_nxv2i16_i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv2i16_nxv2i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vwsub.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1608,7 +1638,7 @@ %0, i8 %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1616,7 +1646,7 @@ define @intrinsic_vwsub.w_mask_wx_tie_nxv4i16_nxv4i16_i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv4i16_nxv4i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vwsub.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1625,7 +1655,7 @@ %0, i8 %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1633,7 +1663,7 @@ define @intrinsic_vwsub.w_mask_wx_tie_nxv8i16_nxv8i16_i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv8i16_nxv8i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vwsub.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1642,7 +1672,7 @@ %0, i8 %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1650,7 +1680,7 @@ define @intrinsic_vwsub.w_mask_wx_tie_nxv16i16_nxv16i16_i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv16i16_nxv16i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vwsub.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1659,7 +1689,7 @@ %0, i8 %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1667,7 +1697,7 @@ define @intrinsic_vwsub.w_mask_wx_tie_nxv32i16_nxv32i16_i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv32i16_nxv32i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vwsub.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1676,7 +1706,7 @@ %0, i8 %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1684,7 +1714,7 @@ define @intrinsic_vwsub.w_mask_wx_tie_nxv1i32_nxv1i32_i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv1i32_nxv1i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vwsub.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1693,7 +1723,7 @@ %0, i16 %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1701,7 +1731,7 @@ define @intrinsic_vwsub.w_mask_wx_tie_nxv2i32_nxv2i32_i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv2i32_nxv2i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vwsub.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1710,7 +1740,7 @@ %0, i16 %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1718,7 +1748,7 @@ define @intrinsic_vwsub.w_mask_wx_tie_nxv4i32_nxv4i32_i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv4i32_nxv4i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vwsub.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1727,7 +1757,7 @@ %0, i16 %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1735,7 +1765,7 @@ define @intrinsic_vwsub.w_mask_wx_tie_nxv8i32_nxv8i32_i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv8i32_nxv8i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vwsub.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1744,7 +1774,7 @@ %0, i16 %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1752,7 +1782,7 @@ define @intrinsic_vwsub.w_mask_wx_tie_nxv16i32_nxv16i32_i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv16i32_nxv16i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vwsub.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1761,7 +1791,7 @@ %0, i16 %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1769,7 +1799,7 @@ define @intrinsic_vwsub.w_mask_wx_tie_nxv1i64_nxv1i64_i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv1i64_nxv1i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vwsub.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1778,7 +1808,7 @@ %0, i32 %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1786,7 +1816,7 @@ define @intrinsic_vwsub.w_mask_wx_tie_nxv2i64_nxv2i64_i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv2i64_nxv2i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vwsub.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1795,7 +1825,7 @@ %0, i32 %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1803,7 +1833,7 @@ define @intrinsic_vwsub.w_mask_wx_tie_nxv4i64_nxv4i64_i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv4i64_nxv4i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vwsub.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1812,7 +1842,7 @@ %0, i32 %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1820,7 +1850,7 @@ define @intrinsic_vwsub.w_mask_wx_tie_nxv8i64_nxv8i64_i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv8i64_nxv8i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vwsub.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1829,7 +1859,7 @@ %0, i32 %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv32.ll @@ -27,12 +27,13 @@ , , , + i32, i32); define @intrinsic_vwsubu_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vwsubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -41,7 +42,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -72,12 +73,13 @@ , , , + i32, i32); define @intrinsic_vwsubu_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vwsubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -86,7 +88,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -117,12 +119,13 @@ , , , + i32, i32); define @intrinsic_vwsubu_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vwsubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -131,7 +134,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -162,12 +165,13 @@ , , , + i32, i32); define @intrinsic_vwsubu_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vwsubu.vv v8, v10, v11, v0.t ; CHECK-NEXT: ret entry: @@ -176,7 +180,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -207,12 +211,13 @@ , , , + i32, i32); define @intrinsic_vwsubu_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vwsubu.vv v8, v12, v14, v0.t ; CHECK-NEXT: ret entry: @@ -221,7 +226,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -252,12 +257,13 @@ , , , + i32, i32); define @intrinsic_vwsubu_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vwsubu.vv v8, v16, v20, v0.t ; CHECK-NEXT: ret entry: @@ -266,7 +272,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -297,12 +303,13 @@ , , , + i32, i32); define @intrinsic_vwsubu_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vwsubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -311,7 +318,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -342,12 +349,13 @@ , , , + i32, i32); define @intrinsic_vwsubu_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vwsubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -356,7 +364,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -387,12 +395,13 @@ , , , + i32, i32); define @intrinsic_vwsubu_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vwsubu.vv v8, v10, v11, v0.t ; CHECK-NEXT: ret entry: @@ -401,7 +410,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -432,12 +441,13 @@ , , , + i32, i32); define @intrinsic_vwsubu_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vwsubu.vv v8, v12, v14, v0.t ; CHECK-NEXT: ret entry: @@ -446,7 +456,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -477,12 +487,13 @@ , , , + i32, i32); define @intrinsic_vwsubu_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vwsubu.vv v8, v16, v20, v0.t ; CHECK-NEXT: ret entry: @@ -491,7 +502,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -522,12 +533,13 @@ , , , + i32, i32); define @intrinsic_vwsubu_mask_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vwsubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -536,7 +548,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -567,12 +579,13 @@ , , , + i32, i32); define @intrinsic_vwsubu_mask_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vwsubu.vv v8, v10, v11, v0.t ; CHECK-NEXT: ret entry: @@ -581,7 +594,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -612,12 +625,13 @@ , , , + i32, i32); define @intrinsic_vwsubu_mask_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vwsubu.vv v8, v12, v14, v0.t ; CHECK-NEXT: ret entry: @@ -626,7 +640,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -657,12 +671,13 @@ , , , + i32, i32); define @intrinsic_vwsubu_mask_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vwsubu.vv v8, v16, v20, v0.t ; CHECK-NEXT: ret entry: @@ -671,7 +686,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -702,12 +717,13 @@ , i8, , + i32, i32); define @intrinsic_vwsubu_mask_vx_nxv1i16_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv1i16_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vwsubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -716,7 +732,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -747,12 +763,13 @@ , i8, , + i32, i32); define @intrinsic_vwsubu_mask_vx_nxv2i16_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv2i16_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vwsubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -761,7 +778,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -792,12 +809,13 @@ , i8, , + i32, i32); define @intrinsic_vwsubu_mask_vx_nxv4i16_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv4i16_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vwsubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -806,7 +824,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -837,12 +855,13 @@ , i8, , + i32, i32); define @intrinsic_vwsubu_mask_vx_nxv8i16_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv8i16_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vwsubu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -851,7 +870,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -882,12 +901,13 @@ , i8, , + i32, i32); define @intrinsic_vwsubu_mask_vx_nxv16i16_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv16i16_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vwsubu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -896,7 +916,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -927,12 +947,13 @@ , i8, , + i32, i32); define @intrinsic_vwsubu_mask_vx_nxv32i16_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv32i16_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vwsubu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -941,7 +962,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -972,12 +993,13 @@ , i16, , + i32, i32); define @intrinsic_vwsubu_mask_vx_nxv1i32_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv1i32_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vwsubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -986,7 +1008,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1017,12 +1039,13 @@ , i16, , + i32, i32); define @intrinsic_vwsubu_mask_vx_nxv2i32_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv2i32_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vwsubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1031,7 +1054,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1062,12 +1085,13 @@ , i16, , + i32, i32); define @intrinsic_vwsubu_mask_vx_nxv4i32_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv4i32_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vwsubu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1076,7 +1100,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1107,12 +1131,13 @@ , i16, , + i32, i32); define @intrinsic_vwsubu_mask_vx_nxv8i32_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv8i32_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vwsubu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1121,7 +1146,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1152,12 +1177,13 @@ , i16, , + i32, i32); define @intrinsic_vwsubu_mask_vx_nxv16i32_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv16i32_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vwsubu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1166,7 +1192,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1197,12 +1223,13 @@ , i32, , + i32, i32); define @intrinsic_vwsubu_mask_vx_nxv1i64_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv1i64_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vwsubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1211,7 +1238,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1242,12 +1269,13 @@ , i32, , + i32, i32); define @intrinsic_vwsubu_mask_vx_nxv2i64_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv2i64_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vwsubu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1256,7 +1284,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1287,12 +1315,13 @@ , i32, , + i32, i32); define @intrinsic_vwsubu_mask_vx_nxv4i64_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv4i64_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vwsubu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1301,7 +1330,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1332,12 +1361,13 @@ , i32, , + i32, i32); define @intrinsic_vwsubu_mask_vx_nxv8i64_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv8i64_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vwsubu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1346,7 +1376,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv64.ll @@ -27,12 +27,13 @@ , , , + i64, i64); define @intrinsic_vwsubu_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vwsubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -41,7 +42,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -72,12 +73,13 @@ , , , + i64, i64); define @intrinsic_vwsubu_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vwsubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -86,7 +88,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -117,12 +119,13 @@ , , , + i64, i64); define @intrinsic_vwsubu_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vwsubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -131,7 +134,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -162,12 +165,13 @@ , , , + i64, i64); define @intrinsic_vwsubu_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vwsubu.vv v8, v10, v11, v0.t ; CHECK-NEXT: ret entry: @@ -176,7 +180,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -207,12 +211,13 @@ , , , + i64, i64); define @intrinsic_vwsubu_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vwsubu.vv v8, v12, v14, v0.t ; CHECK-NEXT: ret entry: @@ -221,7 +226,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -252,12 +257,13 @@ , , , + i64, i64); define @intrinsic_vwsubu_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vwsubu.vv v8, v16, v20, v0.t ; CHECK-NEXT: ret entry: @@ -266,7 +272,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -297,12 +303,13 @@ , , , + i64, i64); define @intrinsic_vwsubu_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vwsubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -311,7 +318,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -342,12 +349,13 @@ , , , + i64, i64); define @intrinsic_vwsubu_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vwsubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -356,7 +364,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -387,12 +395,13 @@ , , , + i64, i64); define @intrinsic_vwsubu_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vwsubu.vv v8, v10, v11, v0.t ; CHECK-NEXT: ret entry: @@ -401,7 +410,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -432,12 +441,13 @@ , , , + i64, i64); define @intrinsic_vwsubu_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vwsubu.vv v8, v12, v14, v0.t ; CHECK-NEXT: ret entry: @@ -446,7 +456,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -477,12 +487,13 @@ , , , + i64, i64); define @intrinsic_vwsubu_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vwsubu.vv v8, v16, v20, v0.t ; CHECK-NEXT: ret entry: @@ -491,7 +502,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -522,12 +533,13 @@ , , , + i64, i64); define @intrinsic_vwsubu_mask_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vwsubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -536,7 +548,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -567,12 +579,13 @@ , , , + i64, i64); define @intrinsic_vwsubu_mask_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vwsubu.vv v8, v10, v11, v0.t ; CHECK-NEXT: ret entry: @@ -581,7 +594,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -612,12 +625,13 @@ , , , + i64, i64); define @intrinsic_vwsubu_mask_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vwsubu.vv v8, v12, v14, v0.t ; CHECK-NEXT: ret entry: @@ -626,7 +640,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -657,12 +671,13 @@ , , , + i64, i64); define @intrinsic_vwsubu_mask_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vwsubu.vv v8, v16, v20, v0.t ; CHECK-NEXT: ret entry: @@ -671,7 +686,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -702,12 +717,13 @@ , i8, , + i64, i64); define @intrinsic_vwsubu_mask_vx_nxv1i16_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv1i16_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vwsubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -716,7 +732,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -747,12 +763,13 @@ , i8, , + i64, i64); define @intrinsic_vwsubu_mask_vx_nxv2i16_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv2i16_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vwsubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -761,7 +778,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -792,12 +809,13 @@ , i8, , + i64, i64); define @intrinsic_vwsubu_mask_vx_nxv4i16_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv4i16_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vwsubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -806,7 +824,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -837,12 +855,13 @@ , i8, , + i64, i64); define @intrinsic_vwsubu_mask_vx_nxv8i16_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv8i16_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vwsubu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -851,7 +870,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -882,12 +901,13 @@ , i8, , + i64, i64); define @intrinsic_vwsubu_mask_vx_nxv16i16_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv16i16_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vwsubu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -896,7 +916,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -927,12 +947,13 @@ , i8, , + i64, i64); define @intrinsic_vwsubu_mask_vx_nxv32i16_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv32i16_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vwsubu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -941,7 +962,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -972,12 +993,13 @@ , i16, , + i64, i64); define @intrinsic_vwsubu_mask_vx_nxv1i32_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv1i32_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vwsubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -986,7 +1008,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1017,12 +1039,13 @@ , i16, , + i64, i64); define @intrinsic_vwsubu_mask_vx_nxv2i32_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv2i32_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vwsubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1031,7 +1054,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1062,12 +1085,13 @@ , i16, , + i64, i64); define @intrinsic_vwsubu_mask_vx_nxv4i32_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv4i32_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vwsubu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1076,7 +1100,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1107,12 +1131,13 @@ , i16, , + i64, i64); define @intrinsic_vwsubu_mask_vx_nxv8i32_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv8i32_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vwsubu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1121,7 +1146,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1152,12 +1177,13 @@ , i16, , + i64, i64); define @intrinsic_vwsubu_mask_vx_nxv16i32_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv16i32_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vwsubu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1166,7 +1192,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1197,12 +1223,13 @@ , i32, , + i64, i64); define @intrinsic_vwsubu_mask_vx_nxv1i64_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv1i64_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vwsubu.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1211,7 +1238,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1242,12 +1269,13 @@ , i32, , + i64, i64); define @intrinsic_vwsubu_mask_vx_nxv2i64_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv2i64_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vwsubu.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1256,7 +1284,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1287,12 +1315,13 @@ , i32, , + i64, i64); define @intrinsic_vwsubu_mask_vx_nxv4i64_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv4i64_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vwsubu.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1301,7 +1330,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1332,12 +1361,13 @@ , i32, , + i64, i64); define @intrinsic_vwsubu_mask_vx_nxv8i64_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv8i64_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vwsubu.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1346,7 +1376,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv32.ll @@ -26,12 +26,13 @@ , , , + i32, i32); define @intrinsic_vwsubu.w_mask_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vwsubu.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -70,12 +71,13 @@ , , , + i32, i32); define @intrinsic_vwsubu.w_mask_wv_nxv2i16_nxv2i16_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vwsubu.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -114,12 +116,13 @@ , , , + i32, i32); define @intrinsic_vwsubu.w_mask_wv_nxv4i16_nxv4i16_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vwsubu.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -158,12 +161,13 @@ , , , + i32, i32); define @intrinsic_vwsubu.w_mask_wv_nxv8i16_nxv8i16_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vwsubu.wv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -202,12 +206,13 @@ , , , + i32, i32); define @intrinsic_vwsubu.w_mask_wv_nxv16i16_nxv16i16_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vwsubu.wv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -246,13 +251,14 @@ , , , + i32, i32); define @intrinsic_vwsubu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl4r.v v28, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vwsubu.wv v8, v16, v28, v0.t ; CHECK-NEXT: ret entry: @@ -261,7 +267,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -291,12 +297,13 @@ , , , + i32, i32); define @intrinsic_vwsubu.w_mask_wv_nxv1i32_nxv1i32_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vwsubu.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -335,12 +342,13 @@ , , , + i32, i32); define @intrinsic_vwsubu.w_mask_wv_nxv2i32_nxv2i32_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vwsubu.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -379,12 +387,13 @@ , , , + i32, i32); define @intrinsic_vwsubu.w_mask_wv_nxv4i32_nxv4i32_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vwsubu.wv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -423,12 +432,13 @@ , , , + i32, i32); define @intrinsic_vwsubu.w_mask_wv_nxv8i32_nxv8i32_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vwsubu.wv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -467,13 +477,14 @@ , , , + i32, i32); define @intrinsic_vwsubu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl4re16.v v28, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vwsubu.wv v8, v16, v28, v0.t ; CHECK-NEXT: ret entry: @@ -482,7 +493,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -512,12 +523,13 @@ , , , + i32, i32); define @intrinsic_vwsubu.w_mask_wv_nxv1i64_nxv1i64_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vwsubu.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -526,7 +538,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -556,12 +568,13 @@ , , , + i32, i32); define @intrinsic_vwsubu.w_mask_wv_nxv2i64_nxv2i64_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vwsubu.wv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -600,12 +613,13 @@ , , , + i32, i32); define @intrinsic_vwsubu.w_mask_wv_nxv4i64_nxv4i64_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vwsubu.wv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -644,13 +658,14 @@ , , , + i32, i32); define @intrinsic_vwsubu.w_mask_wv_nxv8i64_nxv8i64_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl4re32.v v28, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vwsubu.wv v8, v16, v28, v0.t ; CHECK-NEXT: ret entry: @@ -659,7 +674,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -689,12 +704,13 @@ , i8, , + i32, i32); define @intrinsic_vwsubu.w_mask_wx_nxv1i16_nxv1i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv1i16_nxv1i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vwsubu.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -703,7 +719,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -733,12 +749,13 @@ , i8, , + i32, i32); define @intrinsic_vwsubu.w_mask_wx_nxv2i16_nxv2i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv2i16_nxv2i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vwsubu.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -747,7 +764,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -777,12 +794,13 @@ , i8, , + i32, i32); define @intrinsic_vwsubu.w_mask_wx_nxv4i16_nxv4i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv4i16_nxv4i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vwsubu.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +809,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -821,12 +839,13 @@ , i8, , + i32, i32); define @intrinsic_vwsubu.w_mask_wx_nxv8i16_nxv8i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv8i16_nxv8i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vwsubu.wx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +854,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -865,12 +884,13 @@ , i8, , + i32, i32); define @intrinsic_vwsubu.w_mask_wx_nxv16i16_nxv16i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv16i16_nxv16i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vwsubu.wx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +899,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -909,12 +929,13 @@ , i8, , + i32, i32); define @intrinsic_vwsubu.w_mask_wx_nxv32i16_nxv32i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv32i16_nxv32i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vwsubu.wx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +944,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -953,12 +974,13 @@ , i16, , + i32, i32); define @intrinsic_vwsubu.w_mask_wx_nxv1i32_nxv1i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv1i32_nxv1i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vwsubu.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -967,7 +989,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -997,12 +1019,13 @@ , i16, , + i32, i32); define @intrinsic_vwsubu.w_mask_wx_nxv2i32_nxv2i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv2i32_nxv2i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vwsubu.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1011,7 +1034,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1041,12 +1064,13 @@ , i16, , + i32, i32); define @intrinsic_vwsubu.w_mask_wx_nxv4i32_nxv4i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv4i32_nxv4i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vwsubu.wx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1055,7 +1079,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1085,12 +1109,13 @@ , i16, , + i32, i32); define @intrinsic_vwsubu.w_mask_wx_nxv8i32_nxv8i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv8i32_nxv8i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vwsubu.wx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1099,7 +1124,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1129,12 +1154,13 @@ , i16, , + i32, i32); define @intrinsic_vwsubu.w_mask_wx_nxv16i32_nxv16i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv16i32_nxv16i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vwsubu.wx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1143,7 +1169,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1173,12 +1199,13 @@ , i32, , + i32, i32); define @intrinsic_vwsubu.w_mask_wx_nxv1i64_nxv1i64_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv1i64_nxv1i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vwsubu.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1187,7 +1214,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1217,12 +1244,13 @@ , i32, , + i32, i32); define @intrinsic_vwsubu.w_mask_wx_nxv2i64_nxv2i64_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv2i64_nxv2i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vwsubu.wx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1231,7 +1259,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1261,12 +1289,13 @@ , i32, , + i32, i32); define @intrinsic_vwsubu.w_mask_wx_nxv4i64_nxv4i64_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv4i64_nxv4i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vwsubu.wx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1275,7 +1304,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1305,12 +1334,13 @@ , i32, , + i32, i32); define @intrinsic_vwsubu.w_mask_wx_nxv8i64_nxv8i64_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv8i64_nxv8i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vwsubu.wx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1319,7 +1349,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1327,7 +1357,7 @@ define @intrinsic_vwsubu.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vwsubu.wv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -1336,7 +1366,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1344,7 +1374,7 @@ define @intrinsic_vwsubu.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vwsubu.wv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -1353,7 +1383,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1361,7 +1391,7 @@ define @intrinsic_vwsubu.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vwsubu.wv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -1370,7 +1400,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1378,7 +1408,7 @@ define @intrinsic_vwsubu.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vwsubu.wv v8, v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -1387,7 +1417,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1395,7 +1425,7 @@ define @intrinsic_vwsubu.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vwsubu.wv v8, v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -1404,7 +1434,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1412,7 +1442,7 @@ define @intrinsic_vwsubu.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vwsubu.wv v8, v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -1421,7 +1451,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1429,7 +1459,7 @@ define @intrinsic_vwsubu.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vwsubu.wv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -1438,7 +1468,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1446,7 +1476,7 @@ define @intrinsic_vwsubu.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vwsubu.wv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -1455,7 +1485,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1463,7 +1493,7 @@ define @intrinsic_vwsubu.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vwsubu.wv v8, v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -1472,7 +1502,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1480,7 +1510,7 @@ define @intrinsic_vwsubu.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vwsubu.wv v8, v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -1489,7 +1519,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1497,7 +1527,7 @@ define @intrinsic_vwsubu.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vwsubu.wv v8, v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -1506,7 +1536,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1514,7 +1544,7 @@ define @intrinsic_vwsubu.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vwsubu.wv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -1523,7 +1553,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1531,7 +1561,7 @@ define @intrinsic_vwsubu.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vwsubu.wv v8, v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -1540,7 +1570,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1548,7 +1578,7 @@ define @intrinsic_vwsubu.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vwsubu.wv v8, v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -1557,7 +1587,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1565,7 +1595,7 @@ define @intrinsic_vwsubu.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vwsubu.wv v8, v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -1574,7 +1604,7 @@ %0, %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1582,7 +1612,7 @@ define @intrinsic_vwsubu.w_mask_wx_tie_nxv1i16_nxv1i16_i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv1i16_nxv1i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vwsubu.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1591,7 +1621,7 @@ %0, i8 %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1599,7 +1629,7 @@ define @intrinsic_vwsubu.w_mask_wx_tie_nxv2i16_nxv2i16_i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv2i16_nxv2i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vwsubu.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1608,7 +1638,7 @@ %0, i8 %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1616,7 +1646,7 @@ define @intrinsic_vwsubu.w_mask_wx_tie_nxv4i16_nxv4i16_i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv4i16_nxv4i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vwsubu.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1625,7 +1655,7 @@ %0, i8 %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1633,7 +1663,7 @@ define @intrinsic_vwsubu.w_mask_wx_tie_nxv8i16_nxv8i16_i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv8i16_nxv8i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vwsubu.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1642,7 +1672,7 @@ %0, i8 %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1650,7 +1680,7 @@ define @intrinsic_vwsubu.w_mask_wx_tie_nxv16i16_nxv16i16_i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv16i16_nxv16i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vwsubu.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1659,7 +1689,7 @@ %0, i8 %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1667,7 +1697,7 @@ define @intrinsic_vwsubu.w_mask_wx_tie_nxv32i16_nxv32i16_i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv32i16_nxv32i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vwsubu.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1676,7 +1706,7 @@ %0, i8 %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1684,7 +1714,7 @@ define @intrinsic_vwsubu.w_mask_wx_tie_nxv1i32_nxv1i32_i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv1i32_nxv1i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vwsubu.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1693,7 +1723,7 @@ %0, i16 %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1701,7 +1731,7 @@ define @intrinsic_vwsubu.w_mask_wx_tie_nxv2i32_nxv2i32_i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv2i32_nxv2i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vwsubu.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1710,7 +1740,7 @@ %0, i16 %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1718,7 +1748,7 @@ define @intrinsic_vwsubu.w_mask_wx_tie_nxv4i32_nxv4i32_i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv4i32_nxv4i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vwsubu.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1727,7 +1757,7 @@ %0, i16 %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1735,7 +1765,7 @@ define @intrinsic_vwsubu.w_mask_wx_tie_nxv8i32_nxv8i32_i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv8i32_nxv8i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vwsubu.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1744,7 +1774,7 @@ %0, i16 %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1752,7 +1782,7 @@ define @intrinsic_vwsubu.w_mask_wx_tie_nxv16i32_nxv16i32_i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv16i32_nxv16i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vwsubu.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1761,7 +1791,7 @@ %0, i16 %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1769,7 +1799,7 @@ define @intrinsic_vwsubu.w_mask_wx_tie_nxv1i64_nxv1i64_i32( %0, i32 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv1i64_nxv1i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vwsubu.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1778,7 +1808,7 @@ %0, i32 %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1786,7 +1816,7 @@ define @intrinsic_vwsubu.w_mask_wx_tie_nxv2i64_nxv2i64_i32( %0, i32 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv2i64_nxv2i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vwsubu.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1795,7 +1825,7 @@ %0, i32 %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1803,7 +1833,7 @@ define @intrinsic_vwsubu.w_mask_wx_tie_nxv4i64_nxv4i64_i32( %0, i32 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv4i64_nxv4i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vwsubu.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1812,7 +1842,7 @@ %0, i32 %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1820,7 +1850,7 @@ define @intrinsic_vwsubu.w_mask_wx_tie_nxv8i64_nxv8i64_i32( %0, i32 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv8i64_nxv8i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vwsubu.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1829,7 +1859,7 @@ %0, i32 %1, %2, - i32 %3) + i32 %3, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv64.ll @@ -26,12 +26,13 @@ , , , + i64, i64); define @intrinsic_vwsubu.w_mask_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vwsubu.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -70,12 +71,13 @@ , , , + i64, i64); define @intrinsic_vwsubu.w_mask_wv_nxv2i16_nxv2i16_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vwsubu.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -114,12 +116,13 @@ , , , + i64, i64); define @intrinsic_vwsubu.w_mask_wv_nxv4i16_nxv4i16_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vwsubu.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -158,12 +161,13 @@ , , , + i64, i64); define @intrinsic_vwsubu.w_mask_wv_nxv8i16_nxv8i16_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vwsubu.wv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -202,12 +206,13 @@ , , , + i64, i64); define @intrinsic_vwsubu.w_mask_wv_nxv16i16_nxv16i16_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vwsubu.wv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -246,13 +251,14 @@ , , , + i64, i64); define @intrinsic_vwsubu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl4r.v v28, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vwsubu.wv v8, v16, v28, v0.t ; CHECK-NEXT: ret entry: @@ -261,7 +267,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -291,12 +297,13 @@ , , , + i64, i64); define @intrinsic_vwsubu.w_mask_wv_nxv1i32_nxv1i32_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vwsubu.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -335,12 +342,13 @@ , , , + i64, i64); define @intrinsic_vwsubu.w_mask_wv_nxv2i32_nxv2i32_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vwsubu.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -379,12 +387,13 @@ , , , + i64, i64); define @intrinsic_vwsubu.w_mask_wv_nxv4i32_nxv4i32_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vwsubu.wv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -423,12 +432,13 @@ , , , + i64, i64); define @intrinsic_vwsubu.w_mask_wv_nxv8i32_nxv8i32_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vwsubu.wv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -467,13 +477,14 @@ , , , + i64, i64); define @intrinsic_vwsubu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl4re16.v v28, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vwsubu.wv v8, v16, v28, v0.t ; CHECK-NEXT: ret entry: @@ -482,7 +493,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -512,12 +523,13 @@ , , , + i64, i64); define @intrinsic_vwsubu.w_mask_wv_nxv1i64_nxv1i64_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vwsubu.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -526,7 +538,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -556,12 +568,13 @@ , , , + i64, i64); define @intrinsic_vwsubu.w_mask_wv_nxv2i64_nxv2i64_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vwsubu.wv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -600,12 +613,13 @@ , , , + i64, i64); define @intrinsic_vwsubu.w_mask_wv_nxv4i64_nxv4i64_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vwsubu.wv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -644,13 +658,14 @@ , , , + i64, i64); define @intrinsic_vwsubu.w_mask_wv_nxv8i64_nxv8i64_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl4re32.v v28, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vwsubu.wv v8, v16, v28, v0.t ; CHECK-NEXT: ret entry: @@ -659,7 +674,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -689,12 +704,13 @@ , i8, , + i64, i64); define @intrinsic_vwsubu.w_mask_wx_nxv1i16_nxv1i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv1i16_nxv1i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vwsubu.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -703,7 +719,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -733,12 +749,13 @@ , i8, , + i64, i64); define @intrinsic_vwsubu.w_mask_wx_nxv2i16_nxv2i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv2i16_nxv2i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vwsubu.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -747,7 +764,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -777,12 +794,13 @@ , i8, , + i64, i64); define @intrinsic_vwsubu.w_mask_wx_nxv4i16_nxv4i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv4i16_nxv4i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vwsubu.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +809,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -821,12 +839,13 @@ , i8, , + i64, i64); define @intrinsic_vwsubu.w_mask_wx_nxv8i16_nxv8i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv8i16_nxv8i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vwsubu.wx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +854,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -865,12 +884,13 @@ , i8, , + i64, i64); define @intrinsic_vwsubu.w_mask_wx_nxv16i16_nxv16i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv16i16_nxv16i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vwsubu.wx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +899,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -909,12 +929,13 @@ , i8, , + i64, i64); define @intrinsic_vwsubu.w_mask_wx_nxv32i16_nxv32i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv32i16_nxv32i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vwsubu.wx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +944,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -953,12 +974,13 @@ , i16, , + i64, i64); define @intrinsic_vwsubu.w_mask_wx_nxv1i32_nxv1i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv1i32_nxv1i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vwsubu.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -967,7 +989,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -997,12 +1019,13 @@ , i16, , + i64, i64); define @intrinsic_vwsubu.w_mask_wx_nxv2i32_nxv2i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv2i32_nxv2i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vwsubu.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1011,7 +1034,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1041,12 +1064,13 @@ , i16, , + i64, i64); define @intrinsic_vwsubu.w_mask_wx_nxv4i32_nxv4i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv4i32_nxv4i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vwsubu.wx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1055,7 +1079,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1085,12 +1109,13 @@ , i16, , + i64, i64); define @intrinsic_vwsubu.w_mask_wx_nxv8i32_nxv8i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv8i32_nxv8i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vwsubu.wx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1099,7 +1124,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1129,12 +1154,13 @@ , i16, , + i64, i64); define @intrinsic_vwsubu.w_mask_wx_nxv16i32_nxv16i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv16i32_nxv16i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vwsubu.wx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1143,7 +1169,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1173,12 +1199,13 @@ , i32, , + i64, i64); define @intrinsic_vwsubu.w_mask_wx_nxv1i64_nxv1i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv1i64_nxv1i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vwsubu.wx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1187,7 +1214,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1217,12 +1244,13 @@ , i32, , + i64, i64); define @intrinsic_vwsubu.w_mask_wx_nxv2i64_nxv2i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv2i64_nxv2i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vwsubu.wx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1231,7 +1259,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1261,12 +1289,13 @@ , i32, , + i64, i64); define @intrinsic_vwsubu.w_mask_wx_nxv4i64_nxv4i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv4i64_nxv4i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vwsubu.wx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1275,7 +1304,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1305,12 +1334,13 @@ , i32, , + i64, i64); define @intrinsic_vwsubu.w_mask_wx_nxv8i64_nxv8i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv8i64_nxv8i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vwsubu.wx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1319,7 +1349,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1327,7 +1357,7 @@ define @intrinsic_vwsubu.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vwsubu.wv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -1336,7 +1366,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1344,7 +1374,7 @@ define @intrinsic_vwsubu.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vwsubu.wv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -1353,7 +1383,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1361,7 +1391,7 @@ define @intrinsic_vwsubu.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vwsubu.wv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -1370,7 +1400,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1378,7 +1408,7 @@ define @intrinsic_vwsubu.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vwsubu.wv v8, v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -1387,7 +1417,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1395,7 +1425,7 @@ define @intrinsic_vwsubu.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vwsubu.wv v8, v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -1404,7 +1434,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1412,7 +1442,7 @@ define @intrinsic_vwsubu.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vwsubu.wv v8, v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -1421,7 +1451,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1429,7 +1459,7 @@ define @intrinsic_vwsubu.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vwsubu.wv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -1438,7 +1468,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1446,7 +1476,7 @@ define @intrinsic_vwsubu.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vwsubu.wv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -1455,7 +1485,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1463,7 +1493,7 @@ define @intrinsic_vwsubu.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vwsubu.wv v8, v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -1472,7 +1502,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1480,7 +1510,7 @@ define @intrinsic_vwsubu.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vwsubu.wv v8, v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -1489,7 +1519,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1497,7 +1527,7 @@ define @intrinsic_vwsubu.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vwsubu.wv v8, v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -1506,7 +1536,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1514,7 +1544,7 @@ define @intrinsic_vwsubu.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vwsubu.wv v8, v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -1523,7 +1553,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1531,7 +1561,7 @@ define @intrinsic_vwsubu.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vwsubu.wv v8, v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -1540,7 +1570,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1548,7 +1578,7 @@ define @intrinsic_vwsubu.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vwsubu.wv v8, v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -1557,7 +1587,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1565,7 +1595,7 @@ define @intrinsic_vwsubu.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vwsubu.wv v8, v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -1574,7 +1604,7 @@ %0, %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1582,7 +1612,7 @@ define @intrinsic_vwsubu.w_mask_wx_tie_nxv1i16_nxv1i16_i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv1i16_nxv1i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vwsubu.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1591,7 +1621,7 @@ %0, i8 %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1599,7 +1629,7 @@ define @intrinsic_vwsubu.w_mask_wx_tie_nxv2i16_nxv2i16_i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv2i16_nxv2i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vwsubu.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1608,7 +1638,7 @@ %0, i8 %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1616,7 +1646,7 @@ define @intrinsic_vwsubu.w_mask_wx_tie_nxv4i16_nxv4i16_i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv4i16_nxv4i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vwsubu.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1625,7 +1655,7 @@ %0, i8 %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1633,7 +1663,7 @@ define @intrinsic_vwsubu.w_mask_wx_tie_nxv8i16_nxv8i16_i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv8i16_nxv8i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vwsubu.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1642,7 +1672,7 @@ %0, i8 %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1650,7 +1680,7 @@ define @intrinsic_vwsubu.w_mask_wx_tie_nxv16i16_nxv16i16_i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv16i16_nxv16i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vwsubu.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1659,7 +1689,7 @@ %0, i8 %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1667,7 +1697,7 @@ define @intrinsic_vwsubu.w_mask_wx_tie_nxv32i16_nxv32i16_i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv32i16_nxv32i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vwsubu.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1676,7 +1706,7 @@ %0, i8 %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1684,7 +1714,7 @@ define @intrinsic_vwsubu.w_mask_wx_tie_nxv1i32_nxv1i32_i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv1i32_nxv1i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vwsubu.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1693,7 +1723,7 @@ %0, i16 %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1701,7 +1731,7 @@ define @intrinsic_vwsubu.w_mask_wx_tie_nxv2i32_nxv2i32_i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv2i32_nxv2i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vwsubu.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1710,7 +1740,7 @@ %0, i16 %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1718,7 +1748,7 @@ define @intrinsic_vwsubu.w_mask_wx_tie_nxv4i32_nxv4i32_i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv4i32_nxv4i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vwsubu.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1727,7 +1757,7 @@ %0, i16 %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1735,7 +1765,7 @@ define @intrinsic_vwsubu.w_mask_wx_tie_nxv8i32_nxv8i32_i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv8i32_nxv8i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vwsubu.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1744,7 +1774,7 @@ %0, i16 %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1752,7 +1782,7 @@ define @intrinsic_vwsubu.w_mask_wx_tie_nxv16i32_nxv16i32_i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv16i32_nxv16i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vwsubu.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1761,7 +1791,7 @@ %0, i16 %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1769,7 +1799,7 @@ define @intrinsic_vwsubu.w_mask_wx_tie_nxv1i64_nxv1i64_i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv1i64_nxv1i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vwsubu.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1778,7 +1808,7 @@ %0, i32 %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1786,7 +1816,7 @@ define @intrinsic_vwsubu.w_mask_wx_tie_nxv2i64_nxv2i64_i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv2i64_nxv2i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vwsubu.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1795,7 +1825,7 @@ %0, i32 %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1803,7 +1833,7 @@ define @intrinsic_vwsubu.w_mask_wx_tie_nxv4i64_nxv4i64_i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv4i64_nxv4i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vwsubu.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1812,7 +1842,7 @@ %0, i32 %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1820,7 +1850,7 @@ define @intrinsic_vwsubu.w_mask_wx_tie_nxv8i64_nxv8i64_i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv8i64_nxv8i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vwsubu.wx v8, v8, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1829,7 +1859,7 @@ %0, i32 %1, %2, - i64 %3) + i64 %3, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vxor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vxor-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vxor-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vxor-rv32.ll @@ -26,12 +26,13 @@ , , , + i32, i32); define @intrinsic_vxor_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vxor.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -70,12 +71,13 @@ , , , + i32, i32); define @intrinsic_vxor_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vxor.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -114,12 +116,13 @@ , , , + i32, i32); define @intrinsic_vxor_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vxor.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -158,12 +161,13 @@ , , , + i32, i32); define @intrinsic_vxor_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vxor.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -202,12 +206,13 @@ , , , + i32, i32); define @intrinsic_vxor_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vxor.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -246,12 +251,13 @@ , , , + i32, i32); define @intrinsic_vxor_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vxor.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -260,7 +266,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -290,13 +296,14 @@ , , , + i32, i32); define @intrinsic_vxor_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vxor.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -335,12 +342,13 @@ , , , + i32, i32); define @intrinsic_vxor_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vxor.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -379,12 +387,13 @@ , , , + i32, i32); define @intrinsic_vxor_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vxor.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -423,12 +432,13 @@ , , , + i32, i32); define @intrinsic_vxor_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vxor.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -467,12 +477,13 @@ , , , + i32, i32); define @intrinsic_vxor_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vxor.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -481,7 +492,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -511,12 +522,13 @@ , , , + i32, i32); define @intrinsic_vxor_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vxor.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -525,7 +537,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -555,13 +567,14 @@ , , , + i32, i32); define @intrinsic_vxor_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vxor.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -600,12 +613,13 @@ , , , + i32, i32); define @intrinsic_vxor_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vxor.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -644,12 +658,13 @@ , , , + i32, i32); define @intrinsic_vxor_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vxor.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -658,7 +673,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -688,12 +703,13 @@ , , , + i32, i32); define @intrinsic_vxor_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vxor.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -702,7 +718,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -732,12 +748,13 @@ , , , + i32, i32); define @intrinsic_vxor_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vxor.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +763,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -776,13 +793,14 @@ , , , + i32, i32); define @intrinsic_vxor_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vxor.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +809,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -821,12 +839,13 @@ , , , + i32, i32); define @intrinsic_vxor_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vxor.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +854,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -865,12 +884,13 @@ , , , + i32, i32); define @intrinsic_vxor_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vxor.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +899,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -909,12 +929,13 @@ , , , + i32, i32); define @intrinsic_vxor_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vxor.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +944,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -953,13 +974,14 @@ , , , + i32, i32); define @intrinsic_vxor_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vxor.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -968,7 +990,7 @@ %1, %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -998,12 +1020,13 @@ , i8, , + i32, i32); define @intrinsic_vxor_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vxor.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1012,7 +1035,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1042,12 +1065,13 @@ , i8, , + i32, i32); define @intrinsic_vxor_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vxor.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1056,7 +1080,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1086,12 +1110,13 @@ , i8, , + i32, i32); define @intrinsic_vxor_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vxor.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1100,7 +1125,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1130,12 +1155,13 @@ , i8, , + i32, i32); define @intrinsic_vxor_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vxor.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1144,7 +1170,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1174,12 +1200,13 @@ , i8, , + i32, i32); define @intrinsic_vxor_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vxor.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1188,7 +1215,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1218,12 +1245,13 @@ , i8, , + i32, i32); define @intrinsic_vxor_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vxor.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1232,7 +1260,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1262,12 +1290,13 @@ , i8, , + i32, i32); define @intrinsic_vxor_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vxor.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1276,7 +1305,7 @@ %1, i8 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1306,12 +1335,13 @@ , i16, , + i32, i32); define @intrinsic_vxor_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vxor.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1320,7 +1350,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1350,12 +1380,13 @@ , i16, , + i32, i32); define @intrinsic_vxor_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vxor.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1364,7 +1395,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1394,12 +1425,13 @@ , i16, , + i32, i32); define @intrinsic_vxor_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vxor.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1408,7 +1440,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1438,12 +1470,13 @@ , i16, , + i32, i32); define @intrinsic_vxor_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vxor.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1452,7 +1485,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1482,12 +1515,13 @@ , i16, , + i32, i32); define @intrinsic_vxor_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vxor.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1496,7 +1530,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1526,12 +1560,13 @@ , i16, , + i32, i32); define @intrinsic_vxor_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vxor.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1540,7 +1575,7 @@ %1, i16 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1570,12 +1605,13 @@ , i32, , + i32, i32); define @intrinsic_vxor_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vxor.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1584,7 +1620,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1614,12 +1650,13 @@ , i32, , + i32, i32); define @intrinsic_vxor_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vxor.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1628,7 +1665,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1658,12 +1695,13 @@ , i32, , + i32, i32); define @intrinsic_vxor_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vxor.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1672,7 +1710,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1702,12 +1740,13 @@ , i32, , + i32, i32); define @intrinsic_vxor_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vxor.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1716,7 +1755,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1746,12 +1785,13 @@ , i32, , + i32, i32); define @intrinsic_vxor_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vxor.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1760,7 +1800,7 @@ %1, i32 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1796,6 +1836,7 @@ , i64, , + i32, i32); define @intrinsic_vxor_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1804,10 +1845,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v25, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vxor.vv v8, v9, v25, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1817,7 +1858,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1853,6 +1894,7 @@ , i64, , + i32, i32); define @intrinsic_vxor_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1861,10 +1903,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v26, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vxor.vv v8, v10, v26, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1874,7 +1916,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1910,6 +1952,7 @@ , i64, , + i32, i32); define @intrinsic_vxor_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1918,10 +1961,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v28, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vxor.vv v8, v12, v28, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1931,7 +1974,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -1967,6 +2010,7 @@ , i64, , + i32, i32); define @intrinsic_vxor_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { @@ -1975,10 +2019,10 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v24, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vxor.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1988,7 +2032,7 @@ %1, i64 %2, %3, - i32 %4) + i32 %4, i32 1) ret %a } @@ -2011,7 +2055,7 @@ define @intrinsic_vxor_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vxor.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2020,7 +2064,7 @@ %1, i8 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2043,7 +2087,7 @@ define @intrinsic_vxor_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vxor.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2052,7 +2096,7 @@ %1, i8 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2075,7 +2119,7 @@ define @intrinsic_vxor_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vxor.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2084,7 +2128,7 @@ %1, i8 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2107,7 +2151,7 @@ define @intrinsic_vxor_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vxor.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2116,7 +2160,7 @@ %1, i8 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2139,7 +2183,7 @@ define @intrinsic_vxor_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vxor.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2148,7 +2192,7 @@ %1, i8 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2171,7 +2215,7 @@ define @intrinsic_vxor_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vxor.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2180,7 +2224,7 @@ %1, i8 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2203,7 +2247,7 @@ define @intrinsic_vxor_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vxor.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2212,7 +2256,7 @@ %1, i8 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2235,7 +2279,7 @@ define @intrinsic_vxor_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vxor.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2244,7 +2288,7 @@ %1, i16 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2267,7 +2311,7 @@ define @intrinsic_vxor_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vxor.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2276,7 +2320,7 @@ %1, i16 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2299,7 +2343,7 @@ define @intrinsic_vxor_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vxor.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2308,7 +2352,7 @@ %1, i16 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2331,7 +2375,7 @@ define @intrinsic_vxor_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vxor.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2340,7 +2384,7 @@ %1, i16 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2363,7 +2407,7 @@ define @intrinsic_vxor_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vxor.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2372,7 +2416,7 @@ %1, i16 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2395,7 +2439,7 @@ define @intrinsic_vxor_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vxor.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2404,7 +2448,7 @@ %1, i16 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2427,7 +2471,7 @@ define @intrinsic_vxor_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vxor.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2436,7 +2480,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2459,7 +2503,7 @@ define @intrinsic_vxor_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vxor.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2468,7 +2512,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2491,7 +2535,7 @@ define @intrinsic_vxor_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vxor.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2500,7 +2544,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2523,7 +2567,7 @@ define @intrinsic_vxor_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vxor.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2532,7 +2576,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2555,7 +2599,7 @@ define @intrinsic_vxor_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vxor.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2564,7 +2608,7 @@ %1, i32 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2587,7 +2631,7 @@ define @intrinsic_vxor_mask_vi_nxv1i64_nxv1i64_i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vxor.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2596,7 +2640,7 @@ %1, i64 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2619,7 +2663,7 @@ define @intrinsic_vxor_mask_vi_nxv2i64_nxv2i64_i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vxor.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2628,7 +2672,7 @@ %1, i64 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2651,7 +2695,7 @@ define @intrinsic_vxor_mask_vi_nxv4i64_nxv4i64_i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vxor.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2660,7 +2704,7 @@ %1, i64 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } @@ -2683,7 +2727,7 @@ define @intrinsic_vxor_mask_vi_nxv8i64_nxv8i64_i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vxor.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2692,7 +2736,7 @@ %1, i64 9, %2, - i32 %3) + i32 %3, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vxor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vxor-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vxor-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vxor-rv64.ll @@ -26,12 +26,13 @@ , , , + i64, i64); define @intrinsic_vxor_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vxor.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -40,7 +41,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -70,12 +71,13 @@ , , , + i64, i64); define @intrinsic_vxor_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vxor.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -84,7 +86,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -114,12 +116,13 @@ , , , + i64, i64); define @intrinsic_vxor_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vxor.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -128,7 +131,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -158,12 +161,13 @@ , , , + i64, i64); define @intrinsic_vxor_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vxor.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -172,7 +176,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -202,12 +206,13 @@ , , , + i64, i64); define @intrinsic_vxor_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vxor.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -216,7 +221,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -246,12 +251,13 @@ , , , + i64, i64); define @intrinsic_vxor_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vxor.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -260,7 +266,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -290,13 +296,14 @@ , , , + i64, i64); define @intrinsic_vxor_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vxor.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +312,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -335,12 +342,13 @@ , , , + i64, i64); define @intrinsic_vxor_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vxor.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +357,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -379,12 +387,13 @@ , , , + i64, i64); define @intrinsic_vxor_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vxor.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -393,7 +402,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -423,12 +432,13 @@ , , , + i64, i64); define @intrinsic_vxor_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vxor.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -437,7 +447,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -467,12 +477,13 @@ , , , + i64, i64); define @intrinsic_vxor_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vxor.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -481,7 +492,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -511,12 +522,13 @@ , , , + i64, i64); define @intrinsic_vxor_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vxor.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -525,7 +537,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -555,13 +567,14 @@ , , , + i64, i64); define @intrinsic_vxor_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vxor.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +583,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -600,12 +613,13 @@ , , , + i64, i64); define @intrinsic_vxor_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vxor.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -614,7 +628,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -644,12 +658,13 @@ , , , + i64, i64); define @intrinsic_vxor_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vxor.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -658,7 +673,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -688,12 +703,13 @@ , , , + i64, i64); define @intrinsic_vxor_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vxor.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -702,7 +718,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -732,12 +748,13 @@ , , , + i64, i64); define @intrinsic_vxor_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vxor.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +763,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -776,13 +793,14 @@ , , , + i64, i64); define @intrinsic_vxor_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vxor.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -791,7 +809,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -821,12 +839,13 @@ , , , + i64, i64); define @intrinsic_vxor_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vxor.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -835,7 +854,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -865,12 +884,13 @@ , , , + i64, i64); define @intrinsic_vxor_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vxor.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret entry: @@ -879,7 +899,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -909,12 +929,13 @@ , , , + i64, i64); define @intrinsic_vxor_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vxor.vv v8, v12, v16, v0.t ; CHECK-NEXT: ret entry: @@ -923,7 +944,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -953,13 +974,14 @@ , , , + i64, i64); define @intrinsic_vxor_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vxor.vv v8, v16, v24, v0.t ; CHECK-NEXT: ret entry: @@ -968,7 +990,7 @@ %1, %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -998,12 +1020,13 @@ , i8, , + i64, i64); define @intrinsic_vxor_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vxor.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1012,7 +1035,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1042,12 +1065,13 @@ , i8, , + i64, i64); define @intrinsic_vxor_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vxor.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1056,7 +1080,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1086,12 +1110,13 @@ , i8, , + i64, i64); define @intrinsic_vxor_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vxor.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1100,7 +1125,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1130,12 +1155,13 @@ , i8, , + i64, i64); define @intrinsic_vxor_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vxor.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1144,7 +1170,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1174,12 +1200,13 @@ , i8, , + i64, i64); define @intrinsic_vxor_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vxor.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1188,7 +1215,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1218,12 +1245,13 @@ , i8, , + i64, i64); define @intrinsic_vxor_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vxor.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1232,7 +1260,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1262,12 +1290,13 @@ , i8, , + i64, i64); define @intrinsic_vxor_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vxor.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1276,7 +1305,7 @@ %1, i8 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1306,12 +1335,13 @@ , i16, , + i64, i64); define @intrinsic_vxor_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vxor.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1320,7 +1350,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1350,12 +1380,13 @@ , i16, , + i64, i64); define @intrinsic_vxor_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vxor.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1364,7 +1395,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1394,12 +1425,13 @@ , i16, , + i64, i64); define @intrinsic_vxor_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vxor.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1408,7 +1440,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1438,12 +1470,13 @@ , i16, , + i64, i64); define @intrinsic_vxor_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vxor.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1452,7 +1485,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1482,12 +1515,13 @@ , i16, , + i64, i64); define @intrinsic_vxor_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vxor.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1496,7 +1530,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1526,12 +1560,13 @@ , i16, , + i64, i64); define @intrinsic_vxor_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vxor.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1540,7 +1575,7 @@ %1, i16 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1570,12 +1605,13 @@ , i32, , + i64, i64); define @intrinsic_vxor_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vxor.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1584,7 +1620,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1614,12 +1650,13 @@ , i32, , + i64, i64); define @intrinsic_vxor_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vxor.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1628,7 +1665,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1658,12 +1695,13 @@ , i32, , + i64, i64); define @intrinsic_vxor_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vxor.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1672,7 +1710,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1702,12 +1740,13 @@ , i32, , + i64, i64); define @intrinsic_vxor_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vxor.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1716,7 +1755,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1746,12 +1785,13 @@ , i32, , + i64, i64); define @intrinsic_vxor_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vxor.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1760,7 +1800,7 @@ %1, i32 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1790,12 +1830,13 @@ , i64, , + i64, i64); define @intrinsic_vxor_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vxor.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1804,7 +1845,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1834,12 +1875,13 @@ , i64, , + i64, i64); define @intrinsic_vxor_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vxor.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1848,7 +1890,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1878,12 +1920,13 @@ , i64, , + i64, i64); define @intrinsic_vxor_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vxor.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1892,7 +1935,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1922,12 +1965,13 @@ , i64, , + i64, i64); define @intrinsic_vxor_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vxor.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: @@ -1936,7 +1980,7 @@ %1, i64 %2, %3, - i64 %4) + i64 %4, i64 1) ret %a } @@ -1959,7 +2003,7 @@ define @intrinsic_vxor_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vxor.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -1968,7 +2012,7 @@ %1, i8 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1991,7 +2035,7 @@ define @intrinsic_vxor_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vxor.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2000,7 +2044,7 @@ %1, i8 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2023,7 +2067,7 @@ define @intrinsic_vxor_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vxor.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2032,7 +2076,7 @@ %1, i8 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2055,7 +2099,7 @@ define @intrinsic_vxor_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vxor.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2064,7 +2108,7 @@ %1, i8 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2087,7 +2131,7 @@ define @intrinsic_vxor_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vxor.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2096,7 +2140,7 @@ %1, i8 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2119,7 +2163,7 @@ define @intrinsic_vxor_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vxor.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2128,7 +2172,7 @@ %1, i8 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2151,7 +2195,7 @@ define @intrinsic_vxor_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vxor.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2160,7 +2204,7 @@ %1, i8 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2183,7 +2227,7 @@ define @intrinsic_vxor_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vxor.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2192,7 +2236,7 @@ %1, i16 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2215,7 +2259,7 @@ define @intrinsic_vxor_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vxor.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2224,7 +2268,7 @@ %1, i16 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2247,7 +2291,7 @@ define @intrinsic_vxor_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vxor.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2256,7 +2300,7 @@ %1, i16 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2279,7 +2323,7 @@ define @intrinsic_vxor_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vxor.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2288,7 +2332,7 @@ %1, i16 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2311,7 +2355,7 @@ define @intrinsic_vxor_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vxor.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2320,7 +2364,7 @@ %1, i16 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2343,7 +2387,7 @@ define @intrinsic_vxor_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vxor.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2352,7 +2396,7 @@ %1, i16 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2375,7 +2419,7 @@ define @intrinsic_vxor_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vxor.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2384,7 +2428,7 @@ %1, i32 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2407,7 +2451,7 @@ define @intrinsic_vxor_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vxor.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2416,7 +2460,7 @@ %1, i32 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2439,7 +2483,7 @@ define @intrinsic_vxor_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vxor.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2448,7 +2492,7 @@ %1, i32 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2471,7 +2515,7 @@ define @intrinsic_vxor_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vxor.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2480,7 +2524,7 @@ %1, i32 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2503,7 +2547,7 @@ define @intrinsic_vxor_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vxor.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2512,7 +2556,7 @@ %1, i32 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2535,7 +2579,7 @@ define @intrinsic_vxor_mask_vi_nxv1i64_nxv1i64_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vxor.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2544,7 +2588,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2567,7 +2611,7 @@ define @intrinsic_vxor_mask_vi_nxv2i64_nxv2i64_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vxor.vi v8, v10, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2576,7 +2620,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2599,7 +2643,7 @@ define @intrinsic_vxor_mask_vi_nxv4i64_nxv4i64_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vxor.vi v8, v12, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2608,7 +2652,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } @@ -2631,7 +2675,7 @@ define @intrinsic_vxor_mask_vi_nxv8i64_nxv8i64_i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vxor.vi v8, v16, 9, v0.t ; CHECK-NEXT: ret entry: @@ -2640,7 +2684,7 @@ %1, i64 9, %2, - i64 %3) + i64 %3, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vzext-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vzext-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vzext-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vzext-rv32.ll @@ -24,12 +24,13 @@ , , , + i32, i32); define @intrinsic_vzext_mask_vf8_nxv1i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vzext.vf8 v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -37,7 +38,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -65,12 +66,13 @@ , , , + i32, i32); define @intrinsic_vzext_mask_vf8_nxv2i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf8_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vzext.vf8 v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -78,7 +80,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -106,12 +108,13 @@ , , , + i32, i32); define @intrinsic_vzext_mask_vf8_nxv4i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf8_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vzext.vf8 v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -119,7 +122,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -147,12 +150,13 @@ , , , + i32, i32); define @intrinsic_vzext_mask_vf8_nxv8i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf8_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vzext.vf8 v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -160,7 +164,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -188,12 +192,13 @@ , , , + i32, i32); define @intrinsic_vzext_mask_vf4_nxv1i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vzext.vf4 v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -201,7 +206,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -229,12 +234,13 @@ , , , + i32, i32); define @intrinsic_vzext_mask_vf4_nxv2i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vzext.vf4 v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -242,7 +248,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -270,12 +276,13 @@ , , , + i32, i32); define @intrinsic_vzext_mask_vf4_nxv4i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vzext.vf4 v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -283,7 +290,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -311,12 +318,13 @@ , , , + i32, i32); define @intrinsic_vzext_mask_vf4_nxv8i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vzext.vf4 v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -324,7 +332,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -352,12 +360,13 @@ , , , + i32, i32); define @intrinsic_vzext_mask_vf4_nxv1i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vzext.vf4 v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -365,7 +374,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -393,12 +402,13 @@ , , , + i32, i32); define @intrinsic_vzext_mask_vf4_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vzext.vf4 v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -406,7 +416,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -434,12 +444,13 @@ , , , + i32, i32); define @intrinsic_vzext_mask_vf4_nxv4i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vzext.vf4 v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -447,7 +458,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -475,12 +486,13 @@ , , , + i32, i32); define @intrinsic_vzext_mask_vf4_nxv8i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vzext.vf4 v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -488,7 +500,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -516,12 +528,13 @@ , , , + i32, i32); define @intrinsic_vzext_mask_vf4_nxv16i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vzext.vf4 v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -529,7 +542,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -557,12 +570,13 @@ , , , + i32, i32); define @intrinsic_vzext_mask_vf2_nxv1i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vzext.vf2 v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +584,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -598,12 +612,13 @@ , , , + i32, i32); define @intrinsic_vzext_mask_vf2_nxv2i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vzext.vf2 v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -611,7 +626,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -639,12 +654,13 @@ , , , + i32, i32); define @intrinsic_vzext_mask_vf2_nxv4i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vzext.vf2 v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -652,7 +668,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -680,12 +696,13 @@ , , , + i32, i32); define @intrinsic_vzext_mask_vf2_nxv8i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vzext.vf2 v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -693,7 +710,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -721,12 +738,13 @@ , , , + i32, i32); define @intrinsic_vzext_mask_vf2_nxv1i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vzext.vf2 v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -734,7 +752,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -762,12 +780,13 @@ , , , + i32, i32); define @intrinsic_vzext_mask_vf2_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vzext.vf2 v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -775,7 +794,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -803,12 +822,13 @@ , , , + i32, i32); define @intrinsic_vzext_mask_vf2_nxv4i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vzext.vf2 v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -816,7 +836,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -844,12 +864,13 @@ , , , + i32, i32); define @intrinsic_vzext_mask_vf2_nxv8i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vzext.vf2 v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -857,7 +878,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -885,12 +906,13 @@ , , , + i32, i32); define @intrinsic_vzext_mask_vf2_nxv16i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vzext.vf2 v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -898,7 +920,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -926,12 +948,13 @@ , , , + i32, i32); define @intrinsic_vzext_mask_vf2_nxv1i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vzext.vf2 v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -939,7 +962,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -967,12 +990,13 @@ , , , + i32, i32); define @intrinsic_vzext_mask_vf2_nxv2i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vzext.vf2 v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -980,7 +1004,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1008,12 +1032,13 @@ , , , + i32, i32); define @intrinsic_vzext_mask_vf2_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vzext.vf2 v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -1021,7 +1046,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1049,12 +1074,13 @@ , , , + i32, i32); define @intrinsic_vzext_mask_vf2_nxv8i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vzext.vf2 v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -1062,7 +1088,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1090,12 +1116,13 @@ , , , + i32, i32); define @intrinsic_vzext_mask_vf2_nxv16i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vzext.vf2 v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -1103,7 +1130,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } @@ -1131,12 +1158,13 @@ , , , + i32, i32); define @intrinsic_vzext_mask_vf2_nxv32i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vzext.vf2 v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -1144,7 +1172,7 @@ %1, %2, %0, - i32 %3) + i32 %3, i32 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vzext-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vzext-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vzext-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vzext-rv64.ll @@ -24,12 +24,13 @@ , , , + i64, i64); define @intrinsic_vzext_mask_vf8_nxv1i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vzext.vf8 v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -37,7 +38,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -65,12 +66,13 @@ , , , + i64, i64); define @intrinsic_vzext_mask_vf8_nxv2i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf8_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vzext.vf8 v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -78,7 +80,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -106,12 +108,13 @@ , , , + i64, i64); define @intrinsic_vzext_mask_vf8_nxv4i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf8_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vzext.vf8 v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -119,7 +122,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -147,12 +150,13 @@ , , , + i64, i64); define @intrinsic_vzext_mask_vf8_nxv8i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf8_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vzext.vf8 v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -160,7 +164,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -188,12 +192,13 @@ , , , + i64, i64); define @intrinsic_vzext_mask_vf4_nxv1i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vzext.vf4 v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -201,7 +206,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -229,12 +234,13 @@ , , , + i64, i64); define @intrinsic_vzext_mask_vf4_nxv2i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vzext.vf4 v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -242,7 +248,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -270,12 +276,13 @@ , , , + i64, i64); define @intrinsic_vzext_mask_vf4_nxv4i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vzext.vf4 v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -283,7 +290,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -311,12 +318,13 @@ , , , + i64, i64); define @intrinsic_vzext_mask_vf4_nxv8i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vzext.vf4 v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -324,7 +332,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -352,12 +360,13 @@ , , , + i64, i64); define @intrinsic_vzext_mask_vf4_nxv1i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vzext.vf4 v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -365,7 +374,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -393,12 +402,13 @@ , , , + i64, i64); define @intrinsic_vzext_mask_vf4_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vzext.vf4 v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -406,7 +416,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -434,12 +444,13 @@ , , , + i64, i64); define @intrinsic_vzext_mask_vf4_nxv4i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vzext.vf4 v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -447,7 +458,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -475,12 +486,13 @@ , , , + i64, i64); define @intrinsic_vzext_mask_vf4_nxv8i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vzext.vf4 v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -488,7 +500,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -516,12 +528,13 @@ , , , + i64, i64); define @intrinsic_vzext_mask_vf4_nxv16i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vzext.vf4 v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -529,7 +542,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -557,12 +570,13 @@ , , , + i64, i64); define @intrinsic_vzext_mask_vf2_nxv1i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vzext.vf2 v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -570,7 +584,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -598,12 +612,13 @@ , , , + i64, i64); define @intrinsic_vzext_mask_vf2_nxv2i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vzext.vf2 v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -611,7 +626,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -639,12 +654,13 @@ , , , + i64, i64); define @intrinsic_vzext_mask_vf2_nxv4i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vzext.vf2 v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -652,7 +668,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -680,12 +696,13 @@ , , , + i64, i64); define @intrinsic_vzext_mask_vf2_nxv8i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vzext.vf2 v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -693,7 +710,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -721,12 +738,13 @@ , , , + i64, i64); define @intrinsic_vzext_mask_vf2_nxv1i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vzext.vf2 v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -734,7 +752,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -762,12 +780,13 @@ , , , + i64, i64); define @intrinsic_vzext_mask_vf2_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vzext.vf2 v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -775,7 +794,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -803,12 +822,13 @@ , , , + i64, i64); define @intrinsic_vzext_mask_vf2_nxv4i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vzext.vf2 v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -816,7 +836,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -844,12 +864,13 @@ , , , + i64, i64); define @intrinsic_vzext_mask_vf2_nxv8i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vzext.vf2 v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -857,7 +878,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -885,12 +906,13 @@ , , , + i64, i64); define @intrinsic_vzext_mask_vf2_nxv16i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vzext.vf2 v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -898,7 +920,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -926,12 +948,13 @@ , , , + i64, i64); define @intrinsic_vzext_mask_vf2_nxv1i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vzext.vf2 v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -939,7 +962,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -967,12 +990,13 @@ , , , + i64, i64); define @intrinsic_vzext_mask_vf2_nxv2i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vzext.vf2 v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -980,7 +1004,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1008,12 +1032,13 @@ , , , + i64, i64); define @intrinsic_vzext_mask_vf2_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vzext.vf2 v8, v9, v0.t ; CHECK-NEXT: ret entry: @@ -1021,7 +1046,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1049,12 +1074,13 @@ , , , + i64, i64); define @intrinsic_vzext_mask_vf2_nxv8i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vzext.vf2 v8, v10, v0.t ; CHECK-NEXT: ret entry: @@ -1062,7 +1088,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1090,12 +1116,13 @@ , , , + i64, i64); define @intrinsic_vzext_mask_vf2_nxv16i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vzext.vf2 v8, v12, v0.t ; CHECK-NEXT: ret entry: @@ -1103,7 +1130,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } @@ -1131,12 +1158,13 @@ , , , + i64, i64); define @intrinsic_vzext_mask_vf2_nxv32i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vzext.vf2 v8, v16, v0.t ; CHECK-NEXT: ret entry: @@ -1144,7 +1172,7 @@ %1, %2, %0, - i64 %3) + i64 %3, i64 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/zvlsseg-zero-vl.ll b/llvm/test/CodeGen/RISCV/rvv/zvlsseg-zero-vl.ll --- a/llvm/test/CodeGen/RISCV/rvv/zvlsseg-zero-vl.ll +++ b/llvm/test/CodeGen/RISCV/rvv/zvlsseg-zero-vl.ll @@ -6,7 +6,7 @@ ; for these intrinsics. declare {,} @llvm.riscv.vlseg2.nxv16i16(i16* , i64) -declare {,} @llvm.riscv.vlseg2.mask.nxv16i16(,, i16*, , i64) +declare {,} @llvm.riscv.vlseg2.mask.nxv16i16(,, i16*, , i64, i64) define @test_vlseg2_mask_nxv16i16(i16* %base, %mask) { ; CHECK-LABEL: test_vlseg2_mask_nxv16i16: @@ -14,20 +14,19 @@ ; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, mu ; CHECK-NEXT: vlseg2e16.v v4, (a0) ; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: vsetvli zero, zero, e16, m4, tu, mu ; CHECK-NEXT: vlseg2e16.v v4, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv16i16(i16* %base, i64 0) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv16i16( %1, %1, i16* %base, %mask, i64 0) + %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv16i16( %1, %1, i16* %base, %mask, i64 0, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,} @llvm.riscv.vlsseg2.nxv16i16(i16*, i64, i64) -declare {,} @llvm.riscv.vlsseg2.mask.nxv16i16(,, i16*, i64, , i64) +declare {,} @llvm.riscv.vlsseg2.mask.nxv16i16(,, i16*, i64, , i64, i64) define @test_vlsseg2_mask_nxv16i16(i16* %base, i64 %offset, %mask) { ; CHECK-LABEL: test_vlsseg2_mask_nxv16i16: @@ -35,19 +34,18 @@ ; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, mu ; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1 ; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: vsetvli zero, zero, e16, m4, tu, mu ; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16i16(i16* %base, i64 %offset, i64 0) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv16i16( %1, %1, i16* %base, i64 %offset, %mask, i64 0) + %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv16i16( %1, %1, i16* %base, i64 %offset, %mask, i64 0, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i16(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i16(,, i16*, , , i64) +declare {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i16(,, i16*, , , i64, i64) define @test_vloxseg2_mask_nxv16i16_nxv16i16(i16* %base, %index, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv16i16_nxv16i16: @@ -55,20 +53,19 @@ ; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli zero, zero, e16, m4, tu, mu ; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i16(i16* %base, %index, i64 0) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i16( %1, %1, i16* %base, %index, %mask, i64 0) + %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i16( %1, %1, i16* %base, %index, %mask, i64 0, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i16(i16*, , i64) -declare {,} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i16(,, i16*, , , i64) +declare {,} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i16(,, i16*, , , i64, i64) define @test_vluxseg2_mask_nxv16i16_nxv16i16(i16* %base, %index, %mask) { ; CHECK-LABEL: test_vluxseg2_mask_nxv16i16_nxv16i16: @@ -76,20 +73,19 @@ ; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli zero, zero, e16, m4, tu, mu ; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8, v0.t ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i16(i16* %base, %index, i64 0) %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i16( %1, %1, i16* %base, %index, %mask, i64 0) + %2 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i16( %1, %1, i16* %base, %index, %mask, i64 0, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } declare {,, i64} @llvm.riscv.vlseg2ff.nxv16i16(i16* , i64) -declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16(,, i16*, , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16(,, i16*, , i64, i64) define @test_vlseg2ff_nxv16i16(i16* %base, i64* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv16i16: @@ -112,14 +108,14 @@ ; CHECK-LABEL: test_vlseg2ff_mask_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv4r.v v4, v8 -; CHECK-NEXT: vsetivli zero, 0, e16, m4, tu, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v4, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a1) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16( %val, %val, i16* %base, %mask, i64 0) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16( %val, %val, i16* %base, %mask, i64 0, i64 1) %1 = extractvalue {,, i64} %0, 1 %2 = extractvalue {,, i64} %0, 2 store i64 %2, i64* %outvl