diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -6283,8 +6283,9 @@ DAG.getUNDEF(M1VT), InitialValue, DAG.getConstant(0, DL, XLenVT)); SDValue PassThru = NonZeroAVL ? DAG.getUNDEF(M1VT) : InitialValue; - SDValue Reduction = DAG.getNode(RVVOpcode, DL, M1VT, PassThru, Vec, - InitialValue, Mask, VL); + SDValue Policy = DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT); + SDValue Ops[] = {PassThru, Vec, InitialValue, Mask, VL, Policy}; + SDValue Reduction = DAG.getNode(RVVOpcode, DL, M1VT, Ops); return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResVT, Reduction, DAG.getConstant(0, DL, XLenVT)); } @@ -8700,10 +8701,11 @@ DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ScalarVT, DAG.getUNDEF(ScalarVT), NewScalarV, DAG.getConstant(0, DL, Subtarget.getXLenVT())); + SDValue Ops[] = {Reduce.getOperand(0), Reduce.getOperand(1), + NewScalarV, Reduce.getOperand(3), + Reduce.getOperand(4), Reduce.getOperand(5)}; SDValue NewReduce = - DAG.getNode(Reduce.getOpcode(), DL, Reduce.getValueType(), - Reduce.getOperand(0), Reduce.getOperand(1), NewScalarV, - Reduce.getOperand(3), Reduce.getOperand(4)); + DAG.getNode(Reduce.getOpcode(), DL, Reduce.getValueType(), Ops); return DAG.getNode(Extract.getOpcode(), DL, Extract.getValueType(), NewReduce, Extract.getOperand(1)); } diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -1233,6 +1233,25 @@ let UsesMaskPolicy = 1; } +class VPseudoBinaryTailPolicy<VReg RetClass, + RegisterClass Op1Class, + DAGOperand Op2Class, + string Constraint> : + Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd), + (ins GetVRegNoV0<RetClass>.R:$merge, + Op1Class:$rs2, Op2Class:$rs1, + VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>, + RISCVVPseudo { + let mayLoad = 0; + let mayStore = 0; + let hasSideEffects = 0; + let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret; + let HasVLOp = 1; + let HasSEWOp = 1; + let HasMergeOp = 1; + let HasVecPolicyOp = 1; +} + // Like VPseudoBinaryMask, but output can be V0. class VPseudoBinaryMOutMask<VReg RetClass, RegisterClass Op1Class, @@ -3052,6 +3071,20 @@ } } +multiclass VPseudoTernaryWithTailPolicy<VReg RetClass, + RegisterClass Op1Class, + DAGOperand Op2Class, + LMULInfo MInfo, + string Constraint = "", + bit Commutable = 0> { + let VLMul = MInfo.value in { + let isCommutable = Commutable in + def "_" # MInfo.MX : VPseudoTernaryNoMaskWithPolicy<RetClass, Op1Class, Op2Class, Constraint>; + def "_" # MInfo.MX # "_MASK" : VPseudoBinaryTailPolicy<RetClass, Op1Class, Op2Class, Constraint>; + } +} + + multiclass VPseudoTernaryWithPolicy<VReg RetClass, RegisterClass Op1Class, DAGOperand Op2Class, @@ -3307,7 +3340,7 @@ foreach m = MxList in { defvar mx = m.MX; defvar WriteVIRedV_From_MX = !cast<SchedWrite>("WriteVIRedV_From_" # mx); - defm _VS : VPseudoTernary<V_M1.vrclass, m.vrclass, V_M1.vrclass, m>, + defm _VS : VPseudoTernaryWithTailPolicy<V_M1.vrclass, m.vrclass, V_M1.vrclass, m>, Sched<[WriteVIRedV_From_MX, ReadVIRedV, ReadVIRedV, ReadVIRedV, ReadVMask]>; } @@ -3317,7 +3350,7 @@ foreach m = MxList in { defvar mx = m.MX; defvar WriteVIWRedV_From_MX = !cast<SchedWrite>("WriteVIWRedV_From_" # mx); - defm _VS : VPseudoTernary<V_M1.vrclass, m.vrclass, V_M1.vrclass, m>, + defm _VS : VPseudoTernaryWithTailPolicy<V_M1.vrclass, m.vrclass, V_M1.vrclass, m>, Sched<[WriteVIWRedV_From_MX, ReadVIWRedV, ReadVIWRedV, ReadVIWRedV, ReadVMask]>; } @@ -3327,7 +3360,7 @@ foreach m = MxListF in { defvar mx = m.MX; defvar WriteVFRedV_From_MX = !cast<SchedWrite>("WriteVFRedV_From_" # mx); - defm _VS : VPseudoTernary<V_M1.vrclass, m.vrclass, V_M1.vrclass, m>, + defm _VS : VPseudoTernaryWithTailPolicy<V_M1.vrclass, m.vrclass, V_M1.vrclass, m>, Sched<[WriteVFRedV_From_MX, ReadVFRedV, ReadVFRedV, ReadVFRedV, ReadVMask]>; } @@ -3337,7 +3370,7 @@ foreach m = MxListF in { defvar mx = m.MX; defvar WriteVFRedOV_From_MX = !cast<SchedWrite>("WriteVFRedOV_From_" # mx); - defm _VS : VPseudoTernary<V_M1.vrclass, m.vrclass, V_M1.vrclass, m>, + defm _VS : VPseudoTernaryWithTailPolicy<V_M1.vrclass, m.vrclass, V_M1.vrclass, m>, Sched<[WriteVFRedOV_From_MX, ReadVFRedOV, ReadVFRedOV, ReadVFRedOV, ReadVMask]>; } @@ -3347,7 +3380,7 @@ foreach m = MxListF in { defvar mx = m.MX; defvar WriteVFWRedV_From_MX = !cast<SchedWrite>("WriteVFWRedV_From_" # mx); - defm _VS : VPseudoTernary<V_M1.vrclass, m.vrclass, V_M1.vrclass, m>, + defm _VS : VPseudoTernaryWithTailPolicy<V_M1.vrclass, m.vrclass, V_M1.vrclass, m>, Sched<[WriteVFWRedV_From_MX, ReadVFWRedV, ReadVFWRedV, ReadVFWRedV, ReadVMask]>; } @@ -4064,6 +4097,28 @@ op2_kind:$rs2, GPR:$vl, sew)>; +class VPatTernaryNoMaskTA<string intrinsic, + string inst, + string kind, + ValueType result_type, + ValueType op1_type, + ValueType op2_type, + int sew, + LMULInfo vlmul, + VReg result_reg_class, + RegisterClass op1_reg_class, + DAGOperand op2_kind> : + Pat<(result_type (!cast<Intrinsic>(intrinsic) + (result_type result_reg_class:$rs3), + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + VLOpFrag)), + (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX) + result_reg_class:$rs3, + (op1_type op1_reg_class:$rs1), + op2_kind:$rs2, + GPR:$vl, sew, TAIL_AGNOSTIC)>; + class VPatTernaryNoMaskWithPolicy<string intrinsic, string inst, string kind, @@ -4136,6 +4191,31 @@ (mask_type V0), GPR:$vl, sew, (XLenVT timm:$policy))>; +class VPatTernaryMaskTA<string intrinsic, + string inst, + string kind, + ValueType result_type, + ValueType op1_type, + ValueType op2_type, + ValueType mask_type, + int sew, + LMULInfo vlmul, + VReg result_reg_class, + RegisterClass op1_reg_class, + DAGOperand op2_kind> : + Pat<(result_type (!cast<Intrinsic>(intrinsic#"_mask") + (result_type result_reg_class:$rs3), + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + (mask_type V0), + VLOpFrag)), + (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX # "_MASK") + result_reg_class:$rs3, + (op1_type op1_reg_class:$rs1), + op2_kind:$rs2, + (mask_type V0), + GPR:$vl, sew, TAIL_AGNOSTIC)>; + multiclass VPatUnaryS_M<string intrinsic_name, string inst> { @@ -4824,6 +4904,26 @@ op2_kind>; } +multiclass VPatTernaryTA<string intrinsic, + string inst, + string kind, + ValueType result_type, + ValueType op1_type, + ValueType op2_type, + ValueType mask_type, + int sew, + LMULInfo vlmul, + VReg result_reg_class, + RegisterClass op1_reg_class, + DAGOperand op2_kind> { + def : VPatTernaryNoMaskTA<intrinsic, inst, kind, result_type, op1_type, + op2_type, sew, vlmul, result_reg_class, + op1_reg_class, op2_kind>; + def : VPatTernaryMaskTA<intrinsic, inst, kind, result_type, op1_type, + op2_type, mask_type, sew, vlmul, result_reg_class, + op1_reg_class, op2_kind>; +} + multiclass VPatTernaryV_VV_AAXA<string intrinsic, string instruction, list<VTypeInfo> vtilist> { foreach vti = vtilist in @@ -4928,19 +5028,19 @@ foreach vti = !if(IsFloat, NoGroupFloatVectors, NoGroupIntegerVectors) in { defvar vectorM1 = !cast<VTypeInfo>(!if(IsFloat, "VF", "VI") # vti.SEW # "M1"); - defm : VPatTernary<intrinsic, instruction, "VS", - vectorM1.Vector, vti.Vector, - vectorM1.Vector, vti.Mask, - vti.Log2SEW, vti.LMul, - VR, vti.RegClass, VR>; + defm : VPatTernaryTA<intrinsic, instruction, "VS", + vectorM1.Vector, vti.Vector, + vectorM1.Vector, vti.Mask, + vti.Log2SEW, vti.LMul, + VR, vti.RegClass, VR>; } foreach gvti = !if(IsFloat, GroupFloatVectors, GroupIntegerVectors) in { - defm : VPatTernary<intrinsic, instruction, "VS", - gvti.VectorM1, gvti.Vector, - gvti.VectorM1, gvti.Mask, - gvti.Log2SEW, gvti.LMul, - VR, gvti.RegClass, VR>; + defm : VPatTernaryTA<intrinsic, instruction, "VS", + gvti.VectorM1, gvti.Vector, + gvti.VectorM1, gvti.Mask, + gvti.Log2SEW, gvti.LMul, + VR, gvti.RegClass, VR>; } } @@ -4950,12 +5050,12 @@ defvar wtiSEW = !mul(vti.SEW, 2); if !le(wtiSEW, 64) then { defvar wtiM1 = !cast<VTypeInfo>(!if(IsFloat, "VF", "VI") # wtiSEW # "M1"); - defm : VPatTernary<intrinsic, instruction, "VS", - wtiM1.Vector, vti.Vector, - wtiM1.Vector, vti.Mask, - vti.Log2SEW, vti.LMul, - wtiM1.RegClass, vti.RegClass, - wtiM1.RegClass>; + defm : VPatTernaryTA<intrinsic, instruction, "VS", + wtiM1.Vector, vti.Vector, + wtiM1.Vector, vti.Mask, + vti.Log2SEW, vti.LMul, + wtiM1.RegClass, vti.RegClass, + wtiM1.RegClass>; } } } diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td @@ -325,9 +325,10 @@ def riscv_vwsub_w_vl : SDNode<"RISCVISD::VWSUB_W_VL", SDT_RISCVVWBinOpW_VL>; def riscv_vwsubu_w_vl : SDNode<"RISCVISD::VWSUBU_W_VL", SDT_RISCVVWBinOpW_VL>; -def SDTRVVVecReduce : SDTypeProfile<1, 5, [ +def SDTRVVVecReduce : SDTypeProfile<1, 6, [ SDTCisVec<0>, SDTCisVec<1>, SDTCisVec<2>, SDTCisSameAs<0, 3>, - SDTCVecEltisVT<4, i1>, SDTCisSameNumEltsAs<2, 4>, SDTCisVT<5, XLenVT> + SDTCVecEltisVT<4, i1>, SDTCisSameNumEltsAs<2, 4>, SDTCisVT<5, XLenVT>, + SDTCisVT<6, XLenVT> ]>; def riscv_add_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, @@ -962,25 +963,29 @@ multiclass VPatReductionVL<SDNode vop, string instruction_name, bit is_float> { foreach vti = !if(is_float, AllFloatVectors, AllIntegerVectors) in { defvar vti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # vti.SEW # "M1"); - def: Pat<(vti_m1.Vector (vop (vti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), VR:$rs2, - (vti.Mask true_mask), - VLOpFrag)), + def: Pat<(vti_m1.Vector (vop (vti_m1.Vector VR:$merge), + (vti.Vector vti.RegClass:$rs1), VR:$rs2, + (vti.Mask true_mask), VLOpFrag, + (XLenVT timm:$policy))), (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX) (vti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), (vti_m1.Vector VR:$rs2), - GPR:$vl, vti.Log2SEW)>; + GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>; - def: Pat<(vti_m1.Vector (vop (vti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), VR:$rs2, - (vti.Mask V0), VLOpFrag)), + def: Pat<(vti_m1.Vector (vop (vti_m1.Vector VR:$merge), + (vti.Vector vti.RegClass:$rs1), VR:$rs2, + (vti.Mask V0), VLOpFrag, + (XLenVT timm:$policy))), (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_MASK") (vti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), (vti_m1.Vector VR:$rs2), - (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; + (vti.Mask V0), GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>; } } + multiclass VPatBinaryExtVL_WV_WX<SDNode op, PatFrags extop, string instruction_name> { foreach vtiToWti = AllWidenableIntVectors in { defvar vti = vtiToWti.Vti; @@ -1030,16 +1035,20 @@ defvar wti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # wti.SEW # "M1"); def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$merge), (wti.Vector (extop (vti.Vector vti.RegClass:$rs1))), - VR:$rs2, (vti.Mask true_mask), VLOpFrag)), + VR:$rs2, (vti.Mask true_mask), VLOpFrag, + (XLenVT timm:$policy))), (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX) (wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), - (wti_m1.Vector VR:$rs2), GPR:$vl, vti.Log2SEW)>; + (wti_m1.Vector VR:$rs2), GPR:$vl, vti.Log2SEW, + (XLenVT timm:$policy))>; def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$merge), (wti.Vector (extop (vti.Vector vti.RegClass:$rs1))), - VR:$rs2, (vti.Mask V0), VLOpFrag)), + VR:$rs2, (vti.Mask V0), VLOpFrag, + (XLenVT timm:$policy))), (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_MASK") (wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), - (wti_m1.Vector VR:$rs2), (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; + (wti_m1.Vector VR:$rs2), (vti.Mask V0), GPR:$vl, vti.Log2SEW, + (XLenVT timm:$policy))>; } } @@ -1050,16 +1059,20 @@ defvar wti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # wti.SEW # "M1"); def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$merge), (wti.Vector (extop (vti.Vector vti.RegClass:$rs1), (vti.Mask true_mask), VLOpFrag)), - VR:$rs2, (vti.Mask true_mask), VLOpFrag)), + VR:$rs2, (vti.Mask true_mask), VLOpFrag, + (XLenVT timm:$policy))), (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX) (wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), - (wti_m1.Vector VR:$rs2), GPR:$vl, vti.Log2SEW)>; + (wti_m1.Vector VR:$rs2), GPR:$vl, vti.Log2SEW, + (XLenVT timm:$policy))>; def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$merge), (wti.Vector (extop (vti.Vector vti.RegClass:$rs1), (vti.Mask true_mask), VLOpFrag)), - VR:$rs2, (vti.Mask V0), VLOpFrag)), + VR:$rs2, (vti.Mask V0), VLOpFrag, + (XLenVT timm:$policy))), (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_MASK") (wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), - (wti_m1.Vector VR:$rs2), (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; + (wti_m1.Vector VR:$rs2), (vti.Mask V0), GPR:$vl, vti.Log2SEW, + (XLenVT timm:$policy))>; } } diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp-vp.ll @@ -11,7 +11,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfredusum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 ; CHECK-NEXT: ret @@ -24,7 +24,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfredosum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 ; CHECK-NEXT: ret @@ -39,7 +39,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfredusum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 ; CHECK-NEXT: ret @@ -52,7 +52,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfredosum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 ; CHECK-NEXT: ret @@ -67,7 +67,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfredusum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 ; CHECK-NEXT: ret @@ -80,7 +80,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfredosum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 ; CHECK-NEXT: ret @@ -95,7 +95,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfredusum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 ; CHECK-NEXT: ret @@ -108,7 +108,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfredosum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 ; CHECK-NEXT: ret @@ -131,13 +131,13 @@ ; CHECK-NEXT: .LBB8_2: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v25, fa0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vfredusum.vs v25, v8, v25, v0.t ; CHECK-NEXT: addi a1, a0, -32 ; CHECK-NEXT: sltu a0, a0, a1 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: and a0, a0, a1 -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vfredusum.vs v25, v16, v25, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v25 @@ -159,13 +159,13 @@ ; CHECK-NEXT: .LBB9_2: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v25, fa0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vfredosum.vs v25, v8, v25, v0.t ; CHECK-NEXT: addi a1, a0, -32 ; CHECK-NEXT: sltu a0, a0, a1 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: and a0, a0, a1 -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vfredosum.vs v25, v16, v25, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v25 @@ -181,7 +181,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfredusum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 ; CHECK-NEXT: ret @@ -194,7 +194,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfredosum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 ; CHECK-NEXT: ret @@ -209,7 +209,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v10, fa0 -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfredusum.vs v10, v8, v10, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v10 ; CHECK-NEXT: ret @@ -222,7 +222,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v10, fa0 -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfredosum.vs v10, v8, v10, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v10 ; CHECK-NEXT: ret @@ -237,7 +237,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v10, fa0 -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfredusum.vs v10, v8, v10, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v10 ; CHECK-NEXT: ret @@ -250,7 +250,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v10, fa0 -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfredosum.vs v10, v8, v10, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v10 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll @@ -11,7 +11,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -27,7 +27,7 @@ ; CHECK-NEXT: andi a0, a0, 255 ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -42,7 +42,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -58,7 +58,7 @@ ; CHECK-NEXT: andi a0, a0, 255 ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vredminu.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -73,7 +73,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -88,7 +88,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -103,7 +103,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -118,7 +118,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -134,7 +134,7 @@ ; CHECK-NEXT: andi a0, a0, 255 ; CHECK-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vredminu.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -149,7 +149,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -165,7 +165,7 @@ ; CHECK-NEXT: andi a0, a0, 255 ; CHECK-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -180,7 +180,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -196,7 +196,7 @@ ; CHECK-NEXT: andi a0, a0, 255 ; CHECK-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vredminu.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -211,7 +211,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -226,7 +226,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -241,7 +241,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -256,7 +256,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -271,7 +271,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -288,7 +288,7 @@ ; RV32-NEXT: srli a0, a0, 16 ; RV32-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; RV32-NEXT: vmv.s.x v9, a0 -; RV32-NEXT: vsetvli zero, a1, e16, mf4, tu, ma +; RV32-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; RV32-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: ret @@ -299,7 +299,7 @@ ; RV64-NEXT: srli a0, a0, 48 ; RV64-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e16, mf4, tu, ma +; RV64-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; RV64-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret @@ -314,7 +314,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -331,7 +331,7 @@ ; RV32-NEXT: srli a0, a0, 16 ; RV32-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; RV32-NEXT: vmv.s.x v9, a0 -; RV32-NEXT: vsetvli zero, a1, e16, mf4, tu, ma +; RV32-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; RV32-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: ret @@ -342,7 +342,7 @@ ; RV64-NEXT: srli a0, a0, 48 ; RV64-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e16, mf4, tu, ma +; RV64-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; RV64-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret @@ -357,7 +357,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -372,7 +372,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -387,7 +387,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -402,7 +402,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -417,7 +417,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -434,7 +434,7 @@ ; RV32-NEXT: srli a0, a0, 16 ; RV32-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV32-NEXT: vmv.s.x v9, a0 -; RV32-NEXT: vsetvli zero, a1, e16, mf2, tu, ma +; RV32-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; RV32-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: ret @@ -445,7 +445,7 @@ ; RV64-NEXT: srli a0, a0, 48 ; RV64-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e16, mf2, tu, ma +; RV64-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; RV64-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret @@ -460,7 +460,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -477,7 +477,7 @@ ; RV32-NEXT: srli a0, a0, 16 ; RV32-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV32-NEXT: vmv.s.x v9, a0 -; RV32-NEXT: vsetvli zero, a1, e16, mf2, tu, ma +; RV32-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; RV32-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: ret @@ -488,7 +488,7 @@ ; RV64-NEXT: srli a0, a0, 48 ; RV64-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e16, mf2, tu, ma +; RV64-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; RV64-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret @@ -503,7 +503,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -518,7 +518,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -533,7 +533,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -548,7 +548,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -563,7 +563,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -578,7 +578,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV32-NEXT: vmv.s.x v9, a0 -; RV32-NEXT: vsetvli zero, a1, e32, mf2, tu, ma +; RV32-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; RV32-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: ret @@ -589,7 +589,7 @@ ; RV64-NEXT: srli a0, a0, 32 ; RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e32, mf2, tu, ma +; RV64-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; RV64-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret @@ -604,7 +604,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -619,7 +619,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV32-NEXT: vmv.s.x v9, a0 -; RV32-NEXT: vsetvli zero, a1, e32, mf2, tu, ma +; RV32-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; RV32-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: ret @@ -630,7 +630,7 @@ ; RV64-NEXT: srli a0, a0, 32 ; RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e32, mf2, tu, ma +; RV64-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; RV64-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret @@ -645,7 +645,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -660,7 +660,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -675,7 +675,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -690,7 +690,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -705,7 +705,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -720,7 +720,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32-NEXT: vmv.s.x v9, a0 -; RV32-NEXT: vsetvli zero, a1, e32, m1, tu, ma +; RV32-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; RV32-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: ret @@ -731,7 +731,7 @@ ; RV64-NEXT: srli a0, a0, 32 ; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e32, m1, tu, ma +; RV64-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; RV64-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret @@ -746,7 +746,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -761,7 +761,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32-NEXT: vmv.s.x v9, a0 -; RV32-NEXT: vsetvli zero, a1, e32, m1, tu, ma +; RV32-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; RV32-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: ret @@ -772,7 +772,7 @@ ; RV64-NEXT: srli a0, a0, 32 ; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e32, m1, tu, ma +; RV64-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; RV64-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret @@ -787,7 +787,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -802,7 +802,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -817,7 +817,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -832,7 +832,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -855,7 +855,7 @@ ; CHECK-NEXT: .LBB49_2: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v25, a0 -; CHECK-NEXT: vsetvli zero, a2, e32, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; CHECK-NEXT: vredxor.vs v25, v8, v25, v0.t ; CHECK-NEXT: vmv.x.s a0, v25 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma @@ -864,7 +864,7 @@ ; CHECK-NEXT: sltu a1, a1, a0 ; CHECK-NEXT: addi a1, a1, -1 ; CHECK-NEXT: and a0, a1, a0 -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vredxor.vs v8, v16, v8, v0.t ; CHECK-NEXT: vmv.x.s a0, v8 @@ -885,7 +885,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, ma +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vredsum.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: li a1, 32 @@ -899,7 +899,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vredsum.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret @@ -919,7 +919,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, ma +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: li a1, 32 @@ -933,7 +933,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret @@ -953,7 +953,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, ma +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vredmax.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: li a1, 32 @@ -967,7 +967,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vredmax.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret @@ -987,7 +987,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, ma +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: li a1, 32 @@ -1001,7 +1001,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret @@ -1021,7 +1021,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, ma +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vredmin.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: li a1, 32 @@ -1035,7 +1035,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vredmin.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret @@ -1055,7 +1055,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, ma +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vredand.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: li a1, 32 @@ -1069,7 +1069,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vredand.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret @@ -1089,7 +1089,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, ma +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vredor.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: li a1, 32 @@ -1103,7 +1103,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vredor.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret @@ -1123,7 +1123,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, ma +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vredxor.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: li a1, 32 @@ -1137,7 +1137,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vredxor.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret @@ -1157,7 +1157,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, ma +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vredsum.vs v10, v8, v10, v0.t ; RV32-NEXT: vmv.x.s a0, v10 ; RV32-NEXT: li a1, 32 @@ -1171,7 +1171,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v10, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, ma +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vredsum.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 ; RV64-NEXT: ret @@ -1191,7 +1191,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, ma +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vredmaxu.vs v10, v8, v10, v0.t ; RV32-NEXT: vmv.x.s a0, v10 ; RV32-NEXT: li a1, 32 @@ -1205,7 +1205,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v10, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, ma +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vredmaxu.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 ; RV64-NEXT: ret @@ -1225,7 +1225,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, ma +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vredmax.vs v10, v8, v10, v0.t ; RV32-NEXT: vmv.x.s a0, v10 ; RV32-NEXT: li a1, 32 @@ -1239,7 +1239,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v10, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, ma +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vredmax.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 ; RV64-NEXT: ret @@ -1259,7 +1259,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, ma +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vredminu.vs v10, v8, v10, v0.t ; RV32-NEXT: vmv.x.s a0, v10 ; RV32-NEXT: li a1, 32 @@ -1273,7 +1273,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v10, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, ma +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vredminu.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 ; RV64-NEXT: ret @@ -1293,7 +1293,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, ma +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vredmin.vs v10, v8, v10, v0.t ; RV32-NEXT: vmv.x.s a0, v10 ; RV32-NEXT: li a1, 32 @@ -1307,7 +1307,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v10, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, ma +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vredmin.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 ; RV64-NEXT: ret @@ -1327,7 +1327,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, ma +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vredand.vs v10, v8, v10, v0.t ; RV32-NEXT: vmv.x.s a0, v10 ; RV32-NEXT: li a1, 32 @@ -1341,7 +1341,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v10, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, ma +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vredand.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 ; RV64-NEXT: ret @@ -1361,7 +1361,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, ma +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vredor.vs v10, v8, v10, v0.t ; RV32-NEXT: vmv.x.s a0, v10 ; RV32-NEXT: li a1, 32 @@ -1375,7 +1375,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v10, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, ma +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vredor.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 ; RV64-NEXT: ret @@ -1395,7 +1395,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, ma +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vredxor.vs v10, v8, v10, v0.t ; RV32-NEXT: vmv.x.s a0, v10 ; RV32-NEXT: li a1, 32 @@ -1409,7 +1409,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v10, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, ma +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vredxor.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 ; RV64-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vfredmax.ll b/llvm/test/CodeGen/RISCV/rvv/vfredmax.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfredmax.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfredmax.ll @@ -12,7 +12,7 @@ define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv1f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfredmax.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -35,7 +35,7 @@ define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv1f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfredmax.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -58,7 +58,7 @@ define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv2f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfredmax.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -81,7 +81,7 @@ define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv2f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfredmax.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfredmax.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -127,7 +127,7 @@ define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfredmax.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -150,7 +150,7 @@ define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv8f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfredmax.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -173,7 +173,7 @@ define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv8f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfredmax.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -196,7 +196,7 @@ define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv16f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfredmax.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -219,7 +219,7 @@ define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv16f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfredmax.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -242,7 +242,7 @@ define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv32f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfredmax.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -265,7 +265,7 @@ define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv32f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfredmax.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -288,7 +288,7 @@ define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv1f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfredmax.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -311,7 +311,7 @@ define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv1f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfredmax.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -334,7 +334,7 @@ define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfredmax.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -357,7 +357,7 @@ define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfredmax.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -380,7 +380,7 @@ define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv4f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfredmax.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -403,7 +403,7 @@ define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv4f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfredmax.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -426,7 +426,7 @@ define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv8f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfredmax.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -449,7 +449,7 @@ define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv8f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfredmax.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -472,7 +472,7 @@ define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv16f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfredmax.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -495,7 +495,7 @@ define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv16f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfredmax.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -518,7 +518,7 @@ define <vscale x 1 x double> @intrinsic_vfredmax_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfredmax.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -541,7 +541,7 @@ define <vscale x 1 x double> @intrinsic_vfredmax_mask_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfredmax.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -564,7 +564,7 @@ define <vscale x 1 x double> @intrinsic_vfredmax_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv2f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfredmax.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -587,7 +587,7 @@ define <vscale x 1 x double> @intrinsic_vfredmax_mask_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv2f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfredmax.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -610,7 +610,7 @@ define <vscale x 1 x double> @intrinsic_vfredmax_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv4f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfredmax.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -633,7 +633,7 @@ define <vscale x 1 x double> @intrinsic_vfredmax_mask_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv4f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfredmax.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -656,7 +656,7 @@ define <vscale x 1 x double> @intrinsic_vfredmax_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv8f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfredmax.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -679,7 +679,7 @@ define <vscale x 1 x double> @intrinsic_vfredmax_mask_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv8f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfredmax.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfredmin.ll b/llvm/test/CodeGen/RISCV/rvv/vfredmin.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfredmin.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfredmin.ll @@ -12,7 +12,7 @@ define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv1f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfredmin.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -35,7 +35,7 @@ define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv1f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfredmin.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -58,7 +58,7 @@ define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv2f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfredmin.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -81,7 +81,7 @@ define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv2f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfredmin.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfredmin.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -127,7 +127,7 @@ define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfredmin.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -150,7 +150,7 @@ define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv8f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfredmin.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -173,7 +173,7 @@ define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv8f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfredmin.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -196,7 +196,7 @@ define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv16f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfredmin.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -219,7 +219,7 @@ define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv16f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfredmin.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -242,7 +242,7 @@ define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv32f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfredmin.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -265,7 +265,7 @@ define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv32f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfredmin.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -288,7 +288,7 @@ define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv1f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfredmin.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -311,7 +311,7 @@ define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv1f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfredmin.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -334,7 +334,7 @@ define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfredmin.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -357,7 +357,7 @@ define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfredmin.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -380,7 +380,7 @@ define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv4f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfredmin.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -403,7 +403,7 @@ define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv4f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfredmin.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -426,7 +426,7 @@ define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv8f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfredmin.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -449,7 +449,7 @@ define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv8f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfredmin.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -472,7 +472,7 @@ define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv16f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfredmin.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -495,7 +495,7 @@ define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv16f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfredmin.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -518,7 +518,7 @@ define <vscale x 1 x double> @intrinsic_vfredmin_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfredmin.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -541,7 +541,7 @@ define <vscale x 1 x double> @intrinsic_vfredmin_mask_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfredmin.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -564,7 +564,7 @@ define <vscale x 1 x double> @intrinsic_vfredmin_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv1f64_nxv2f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfredmin.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -587,7 +587,7 @@ define <vscale x 1 x double> @intrinsic_vfredmin_mask_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv2f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfredmin.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -610,7 +610,7 @@ define <vscale x 1 x double> @intrinsic_vfredmin_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv1f64_nxv4f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfredmin.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -633,7 +633,7 @@ define <vscale x 1 x double> @intrinsic_vfredmin_mask_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv4f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfredmin.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -656,7 +656,7 @@ define <vscale x 1 x double> @intrinsic_vfredmin_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv1f64_nxv8f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfredmin.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -679,7 +679,7 @@ define <vscale x 1 x double> @intrinsic_vfredmin_mask_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv8f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfredmin.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfredosum.ll b/llvm/test/CodeGen/RISCV/rvv/vfredosum.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfredosum.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfredosum.ll @@ -12,7 +12,7 @@ define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv1f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfredosum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -35,7 +35,7 @@ define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv1f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfredosum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -58,7 +58,7 @@ define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv2f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfredosum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -81,7 +81,7 @@ define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv2f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfredosum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfredosum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -127,7 +127,7 @@ define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfredosum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -150,7 +150,7 @@ define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv8f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfredosum.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -173,7 +173,7 @@ define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv8f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfredosum.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -196,7 +196,7 @@ define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv16f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfredosum.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -219,7 +219,7 @@ define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv16f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfredosum.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -242,7 +242,7 @@ define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv32f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfredosum.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -265,7 +265,7 @@ define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv32f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfredosum.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -288,7 +288,7 @@ define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv1f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfredosum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -311,7 +311,7 @@ define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv1f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfredosum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -334,7 +334,7 @@ define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfredosum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -357,7 +357,7 @@ define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfredosum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -380,7 +380,7 @@ define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv4f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfredosum.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -403,7 +403,7 @@ define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv4f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfredosum.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -426,7 +426,7 @@ define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv8f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfredosum.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -449,7 +449,7 @@ define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv8f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfredosum.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -472,7 +472,7 @@ define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv16f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfredosum.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -495,7 +495,7 @@ define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv16f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfredosum.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -518,7 +518,7 @@ define <vscale x 1 x double> @intrinsic_vfredosum_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfredosum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -541,7 +541,7 @@ define <vscale x 1 x double> @intrinsic_vfredosum_mask_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfredosum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -564,7 +564,7 @@ define <vscale x 1 x double> @intrinsic_vfredosum_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv1f64_nxv2f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfredosum.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -587,7 +587,7 @@ define <vscale x 1 x double> @intrinsic_vfredosum_mask_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv2f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfredosum.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -610,7 +610,7 @@ define <vscale x 1 x double> @intrinsic_vfredosum_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv1f64_nxv4f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfredosum.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -633,7 +633,7 @@ define <vscale x 1 x double> @intrinsic_vfredosum_mask_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv4f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfredosum.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -656,7 +656,7 @@ define <vscale x 1 x double> @intrinsic_vfredosum_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv1f64_nxv8f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfredosum.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -679,7 +679,7 @@ define <vscale x 1 x double> @intrinsic_vfredosum_mask_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv8f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfredosum.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfredusum.ll b/llvm/test/CodeGen/RISCV/rvv/vfredusum.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfredusum.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfredusum.ll @@ -12,7 +12,7 @@ define <vscale x 4 x half> @intrinsic_vfredusum_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv1f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfredusum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -35,7 +35,7 @@ define <vscale x 4 x half> @intrinsic_vfredusum_mask_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv1f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfredusum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -58,7 +58,7 @@ define <vscale x 4 x half> @intrinsic_vfredusum_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv2f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfredusum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -81,7 +81,7 @@ define <vscale x 4 x half> @intrinsic_vfredusum_mask_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv2f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfredusum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define <vscale x 4 x half> @intrinsic_vfredusum_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfredusum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -127,7 +127,7 @@ define <vscale x 4 x half> @intrinsic_vfredusum_mask_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfredusum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -150,7 +150,7 @@ define <vscale x 4 x half> @intrinsic_vfredusum_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv8f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfredusum.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -173,7 +173,7 @@ define <vscale x 4 x half> @intrinsic_vfredusum_mask_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv8f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfredusum.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -196,7 +196,7 @@ define <vscale x 4 x half> @intrinsic_vfredusum_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv16f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfredusum.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -219,7 +219,7 @@ define <vscale x 4 x half> @intrinsic_vfredusum_mask_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv16f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfredusum.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -242,7 +242,7 @@ define <vscale x 4 x half> @intrinsic_vfredusum_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv32f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfredusum.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -265,7 +265,7 @@ define <vscale x 4 x half> @intrinsic_vfredusum_mask_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv32f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfredusum.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -288,7 +288,7 @@ define <vscale x 2 x float> @intrinsic_vfredusum_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv1f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfredusum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -311,7 +311,7 @@ define <vscale x 2 x float> @intrinsic_vfredusum_mask_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv1f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfredusum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -334,7 +334,7 @@ define <vscale x 2 x float> @intrinsic_vfredusum_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfredusum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -357,7 +357,7 @@ define <vscale x 2 x float> @intrinsic_vfredusum_mask_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfredusum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -380,7 +380,7 @@ define <vscale x 2 x float> @intrinsic_vfredusum_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv4f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfredusum.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -403,7 +403,7 @@ define <vscale x 2 x float> @intrinsic_vfredusum_mask_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv4f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfredusum.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -426,7 +426,7 @@ define <vscale x 2 x float> @intrinsic_vfredusum_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv8f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfredusum.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -449,7 +449,7 @@ define <vscale x 2 x float> @intrinsic_vfredusum_mask_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv8f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfredusum.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -472,7 +472,7 @@ define <vscale x 2 x float> @intrinsic_vfredusum_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv16f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfredusum.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -495,7 +495,7 @@ define <vscale x 2 x float> @intrinsic_vfredusum_mask_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv16f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfredusum.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -518,7 +518,7 @@ define <vscale x 1 x double> @intrinsic_vfredusum_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfredusum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -541,7 +541,7 @@ define <vscale x 1 x double> @intrinsic_vfredusum_mask_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfredusum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -564,7 +564,7 @@ define <vscale x 1 x double> @intrinsic_vfredusum_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv1f64_nxv2f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfredusum.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -587,7 +587,7 @@ define <vscale x 1 x double> @intrinsic_vfredusum_mask_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv1f64_nxv2f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfredusum.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -610,7 +610,7 @@ define <vscale x 1 x double> @intrinsic_vfredusum_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv1f64_nxv4f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfredusum.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -633,7 +633,7 @@ define <vscale x 1 x double> @intrinsic_vfredusum_mask_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv1f64_nxv4f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfredusum.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -656,7 +656,7 @@ define <vscale x 1 x double> @intrinsic_vfredusum_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv1f64_nxv8f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfredusum.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -679,7 +679,7 @@ define <vscale x 1 x double> @intrinsic_vfredusum_mask_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv1f64_nxv8f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfredusum.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwredosum.ll b/llvm/test/CodeGen/RISCV/rvv/vfwredosum.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwredosum.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwredosum.ll @@ -12,7 +12,7 @@ define <vscale x 2 x float> @intrinsic_vfwredosum_vs_nxv2f32_nxv1f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x half> %1, <vscale x 2 x float> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv1f16_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfwredosum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -35,7 +35,7 @@ define <vscale x 2 x float> @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv1f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x half> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv1f16_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfwredosum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -58,7 +58,7 @@ define <vscale x 2 x float> @intrinsic_vfwredosum_vs_nxv2f32_nxv2f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x float> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv2f16_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfwredosum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -81,7 +81,7 @@ define <vscale x 2 x float> @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv2f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv2f16_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfwredosum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define <vscale x 2 x float> @intrinsic_vfwredosum_vs_nxv2f32_nxv4f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x half> %1, <vscale x 2 x float> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv4f16_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfwredosum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -127,7 +127,7 @@ define <vscale x 2 x float> @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv4f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x half> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv4f16_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfwredosum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -150,7 +150,7 @@ define <vscale x 2 x float> @intrinsic_vfwredosum_vs_nxv2f32_nxv8f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x half> %1, <vscale x 2 x float> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv8f16_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfwredosum.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -173,7 +173,7 @@ define <vscale x 2 x float> @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv8f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x half> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv8f16_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfwredosum.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -196,7 +196,7 @@ define <vscale x 2 x float> @intrinsic_vfwredosum_vs_nxv2f32_nxv16f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x half> %1, <vscale x 2 x float> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv16f16_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfwredosum.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -219,7 +219,7 @@ define <vscale x 2 x float> @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv16f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x half> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv16f16_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfwredosum.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -242,7 +242,7 @@ define <vscale x 2 x float> @intrinsic_vfwredosum_vs_nxv2f32_nxv32f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 32 x half> %1, <vscale x 2 x float> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv32f16_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfwredosum.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -265,7 +265,7 @@ define <vscale x 2 x float> @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv32f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 32 x half> %1, <vscale x 2 x float> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv32f16_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfwredosum.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -288,7 +288,7 @@ define <vscale x 1 x double> @intrinsic_vfwredosum_vs_nxv1f64_nxv1f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x double> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv1f32_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfwredosum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -311,7 +311,7 @@ define <vscale x 1 x double> @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv1f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv1f32_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfwredosum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -334,7 +334,7 @@ define <vscale x 1 x double> @intrinsic_vfwredosum_vs_nxv1f64_nxv2f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x float> %1, <vscale x 1 x double> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv2f32_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfwredosum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -357,7 +357,7 @@ define <vscale x 1 x double> @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv2f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x float> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv2f32_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfwredosum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -380,7 +380,7 @@ define <vscale x 1 x double> @intrinsic_vfwredosum_vs_nxv1f64_nxv4f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x float> %1, <vscale x 1 x double> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv4f32_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfwredosum.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -403,7 +403,7 @@ define <vscale x 1 x double> @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv4f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x float> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv4f32_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfwredosum.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -426,7 +426,7 @@ define <vscale x 1 x double> @intrinsic_vfwredosum_vs_nxv1f64_nxv8f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x float> %1, <vscale x 1 x double> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv8f32_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfwredosum.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -449,7 +449,7 @@ define <vscale x 1 x double> @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv8f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x float> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv8f32_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfwredosum.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -472,7 +472,7 @@ define <vscale x 1 x double> @intrinsic_vfwredosum_vs_nxv1f64_nxv16f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 16 x float> %1, <vscale x 1 x double> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv16f32_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfwredosum.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -495,7 +495,7 @@ define <vscale x 1 x double> @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv16f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 16 x float> %1, <vscale x 1 x double> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv16f32_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfwredosum.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwredusum.ll b/llvm/test/CodeGen/RISCV/rvv/vfwredusum.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwredusum.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwredusum.ll @@ -12,7 +12,7 @@ define <vscale x 2 x float> @intrinsic_vfwredusum_vs_nxv2f32_nxv1f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x half> %1, <vscale x 2 x float> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv1f16_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfwredusum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -35,7 +35,7 @@ define <vscale x 2 x float> @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv1f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x half> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv1f16_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfwredusum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -58,7 +58,7 @@ define <vscale x 2 x float> @intrinsic_vfwredusum_vs_nxv2f32_nxv2f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x float> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv2f16_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfwredusum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -81,7 +81,7 @@ define <vscale x 2 x float> @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv2f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv2f16_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfwredusum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define <vscale x 2 x float> @intrinsic_vfwredusum_vs_nxv2f32_nxv4f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x half> %1, <vscale x 2 x float> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv4f16_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfwredusum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -127,7 +127,7 @@ define <vscale x 2 x float> @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv4f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x half> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv4f16_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfwredusum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -150,7 +150,7 @@ define <vscale x 2 x float> @intrinsic_vfwredusum_vs_nxv2f32_nxv8f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x half> %1, <vscale x 2 x float> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv8f16_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfwredusum.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -173,7 +173,7 @@ define <vscale x 2 x float> @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv8f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x half> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv8f16_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfwredusum.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -196,7 +196,7 @@ define <vscale x 2 x float> @intrinsic_vfwredusum_vs_nxv2f32_nxv16f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x half> %1, <vscale x 2 x float> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv16f16_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfwredusum.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -219,7 +219,7 @@ define <vscale x 2 x float> @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv16f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x half> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv16f16_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfwredusum.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -242,7 +242,7 @@ define <vscale x 2 x float> @intrinsic_vfwredusum_vs_nxv2f32_nxv32f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 32 x half> %1, <vscale x 2 x float> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv32f16_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfwredusum.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -265,7 +265,7 @@ define <vscale x 2 x float> @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv32f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 32 x half> %1, <vscale x 2 x float> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv32f16_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfwredusum.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -288,7 +288,7 @@ define <vscale x 1 x double> @intrinsic_vfwredusum_vs_nxv1f64_nxv1f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x double> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv1f32_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfwredusum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -311,7 +311,7 @@ define <vscale x 1 x double> @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv1f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv1f32_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfwredusum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -334,7 +334,7 @@ define <vscale x 1 x double> @intrinsic_vfwredusum_vs_nxv1f64_nxv2f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x float> %1, <vscale x 1 x double> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv2f32_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfwredusum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -357,7 +357,7 @@ define <vscale x 1 x double> @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv2f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x float> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv2f32_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfwredusum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -380,7 +380,7 @@ define <vscale x 1 x double> @intrinsic_vfwredusum_vs_nxv1f64_nxv4f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x float> %1, <vscale x 1 x double> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv4f32_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfwredusum.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -403,7 +403,7 @@ define <vscale x 1 x double> @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv4f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x float> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv4f32_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfwredusum.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -426,7 +426,7 @@ define <vscale x 1 x double> @intrinsic_vfwredusum_vs_nxv1f64_nxv8f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x float> %1, <vscale x 1 x double> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv8f32_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfwredusum.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -449,7 +449,7 @@ define <vscale x 1 x double> @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv8f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x float> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv8f32_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfwredusum.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -472,7 +472,7 @@ define <vscale x 1 x double> @intrinsic_vfwredusum_vs_nxv1f64_nxv16f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 16 x float> %1, <vscale x 1 x double> %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv16f32_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfwredusum.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -495,7 +495,7 @@ define <vscale x 1 x double> @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv16f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 16 x float> %1, <vscale x 1 x double> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv16f32_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfwredusum.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir b/llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir --- a/llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir +++ b/llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir @@ -214,12 +214,12 @@ ; CHECK: liveins: $x10, $v8, $v26, $v27 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: $x11 = PseudoVSETIVLI 1, 64 /* e8, m1, ta, mu */, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: $v8 = PseudoVWREDSUM_VS_M1 killed renamable $v8, killed renamable $v26, killed renamable $v27, 1, 3 /* e8 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: $v8 = PseudoVWREDSUM_VS_M1 killed renamable $v8, killed renamable $v26, killed renamable $v27, 1, 3 /* e8 */, 1 /* ta, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: $v26 = VMV1R_V killed $v8 ; CHECK-NEXT: $x10 = PseudoVSETVLI killed renamable $x10, 75 /* e16, m8, ta, mu */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: $v8m8 = VL8RE8_V killed $x10 $x11 = PseudoVSETIVLI 1, 64, implicit-def $vl, implicit-def $vtype - $v8 = PseudoVWREDSUM_VS_M1 killed renamable $v8, killed renamable $v26, killed renamable $v27, 1, 3, implicit $vl, implicit $vtype + $v8 = PseudoVWREDSUM_VS_M1 killed renamable $v8, killed renamable $v26, killed renamable $v27, 1, 3, 1, implicit $vl, implicit $vtype $v26 = COPY killed renamable $v8 $x10 = PseudoVSETVLI killed renamable $x10, 75, implicit-def $vl, implicit-def $vtype $v8m8 = VL8RE8_V killed $x10 diff --git a/llvm/test/CodeGen/RISCV/rvv/vredand-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredand-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vredand-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vredand-rv32.ll @@ -10,7 +10,7 @@ define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vredand.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -33,7 +33,7 @@ define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vredand.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -56,7 +56,7 @@ define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vredand.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -79,7 +79,7 @@ define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vredand.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -102,7 +102,7 @@ define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vredand.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -125,7 +125,7 @@ define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vredand.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -148,7 +148,7 @@ define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vredand.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -171,7 +171,7 @@ define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vredand.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -194,7 +194,7 @@ define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vredand.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -217,7 +217,7 @@ define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vredand.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -240,7 +240,7 @@ define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vredand.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -263,7 +263,7 @@ define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vredand.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -286,7 +286,7 @@ define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vredand.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -309,7 +309,7 @@ define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vredand.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -332,7 +332,7 @@ define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vredand.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -355,7 +355,7 @@ define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vredand.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -378,7 +378,7 @@ define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vredand.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -401,7 +401,7 @@ define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vredand.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -424,7 +424,7 @@ define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vredand.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -447,7 +447,7 @@ define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vredand.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -470,7 +470,7 @@ define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vredand.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -493,7 +493,7 @@ define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vredand.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -516,7 +516,7 @@ define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vredand.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -539,7 +539,7 @@ define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vredand.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -562,7 +562,7 @@ define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vredand.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -585,7 +585,7 @@ define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vredand.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -608,7 +608,7 @@ define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vredand.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -631,7 +631,7 @@ define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vredand.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -654,7 +654,7 @@ define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vredand.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -677,7 +677,7 @@ define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vredand.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -700,7 +700,7 @@ define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vredand.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -723,7 +723,7 @@ define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vredand.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +746,7 @@ define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vredand.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -769,7 +769,7 @@ define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vredand.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -792,7 +792,7 @@ define <vscale x 1 x i64> @intrinsic_vredand_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vredand.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -815,7 +815,7 @@ define <vscale x 1 x i64> @intrinsic_vredand_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vredand.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -838,7 +838,7 @@ define <vscale x 1 x i64> @intrinsic_vredand_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vredand.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -861,7 +861,7 @@ define <vscale x 1 x i64> @intrinsic_vredand_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vredand.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -884,7 +884,7 @@ define <vscale x 1 x i64> @intrinsic_vredand_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vredand.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -907,7 +907,7 @@ define <vscale x 1 x i64> @intrinsic_vredand_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vredand.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -930,7 +930,7 @@ define <vscale x 1 x i64> @intrinsic_vredand_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vredand.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define <vscale x 1 x i64> @intrinsic_vredand_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vredand.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vredand-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredand-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vredand-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vredand-rv64.ll @@ -10,7 +10,7 @@ define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vredand.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -33,7 +33,7 @@ define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vredand.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -56,7 +56,7 @@ define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vredand.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -79,7 +79,7 @@ define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vredand.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -102,7 +102,7 @@ define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vredand.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -125,7 +125,7 @@ define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vredand.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -148,7 +148,7 @@ define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vredand.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -171,7 +171,7 @@ define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vredand.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -194,7 +194,7 @@ define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vredand.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -217,7 +217,7 @@ define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vredand.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -240,7 +240,7 @@ define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vredand.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -263,7 +263,7 @@ define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vredand.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -286,7 +286,7 @@ define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vredand.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -309,7 +309,7 @@ define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vredand.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -332,7 +332,7 @@ define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vredand.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -355,7 +355,7 @@ define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vredand.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -378,7 +378,7 @@ define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vredand.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -401,7 +401,7 @@ define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vredand.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -424,7 +424,7 @@ define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vredand.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -447,7 +447,7 @@ define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vredand.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -470,7 +470,7 @@ define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vredand.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -493,7 +493,7 @@ define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vredand.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -516,7 +516,7 @@ define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vredand.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -539,7 +539,7 @@ define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vredand.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -562,7 +562,7 @@ define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vredand.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -585,7 +585,7 @@ define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vredand.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -608,7 +608,7 @@ define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vredand.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -631,7 +631,7 @@ define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vredand.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -654,7 +654,7 @@ define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vredand.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -677,7 +677,7 @@ define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vredand.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -700,7 +700,7 @@ define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vredand.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -723,7 +723,7 @@ define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vredand.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +746,7 @@ define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vredand.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -769,7 +769,7 @@ define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vredand.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -792,7 +792,7 @@ define <vscale x 1 x i64> @intrinsic_vredand_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vredand.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -815,7 +815,7 @@ define <vscale x 1 x i64> @intrinsic_vredand_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vredand.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -838,7 +838,7 @@ define <vscale x 1 x i64> @intrinsic_vredand_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vredand.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -861,7 +861,7 @@ define <vscale x 1 x i64> @intrinsic_vredand_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vredand.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -884,7 +884,7 @@ define <vscale x 1 x i64> @intrinsic_vredand_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vredand.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -907,7 +907,7 @@ define <vscale x 1 x i64> @intrinsic_vredand_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vredand.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -930,7 +930,7 @@ define <vscale x 1 x i64> @intrinsic_vredand_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vredand.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define <vscale x 1 x i64> @intrinsic_vredand_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vredand.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vredmax-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredmax-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vredmax-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vredmax-rv32.ll @@ -10,7 +10,7 @@ define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -33,7 +33,7 @@ define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -56,7 +56,7 @@ define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -79,7 +79,7 @@ define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -102,7 +102,7 @@ define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -125,7 +125,7 @@ define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -148,7 +148,7 @@ define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -171,7 +171,7 @@ define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -194,7 +194,7 @@ define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vredmax.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -217,7 +217,7 @@ define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vredmax.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -240,7 +240,7 @@ define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vredmax.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -263,7 +263,7 @@ define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vredmax.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -286,7 +286,7 @@ define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -309,7 +309,7 @@ define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -332,7 +332,7 @@ define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -355,7 +355,7 @@ define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -378,7 +378,7 @@ define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -401,7 +401,7 @@ define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -424,7 +424,7 @@ define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vredmax.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -447,7 +447,7 @@ define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vredmax.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -470,7 +470,7 @@ define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vredmax.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -493,7 +493,7 @@ define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vredmax.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -516,7 +516,7 @@ define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vredmax.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -539,7 +539,7 @@ define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vredmax.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -562,7 +562,7 @@ define <vscale x 2 x i32> @intrinsic_vredmax_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -585,7 +585,7 @@ define <vscale x 2 x i32> @intrinsic_vredmax_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -608,7 +608,7 @@ define <vscale x 2 x i32> @intrinsic_vredmax_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -631,7 +631,7 @@ define <vscale x 2 x i32> @intrinsic_vredmax_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -654,7 +654,7 @@ define <vscale x 2 x i32> @intrinsic_vredmax_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vredmax.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -677,7 +677,7 @@ define <vscale x 2 x i32> @intrinsic_vredmax_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vredmax.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -700,7 +700,7 @@ define <vscale x 2 x i32> @intrinsic_vredmax_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vredmax.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -723,7 +723,7 @@ define <vscale x 2 x i32> @intrinsic_vredmax_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vredmax.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +746,7 @@ define <vscale x 2 x i32> @intrinsic_vredmax_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vredmax.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -769,7 +769,7 @@ define <vscale x 2 x i32> @intrinsic_vredmax_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vredmax.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -792,7 +792,7 @@ define <vscale x 1 x i64> @intrinsic_vredmax_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -815,7 +815,7 @@ define <vscale x 1 x i64> @intrinsic_vredmax_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -838,7 +838,7 @@ define <vscale x 1 x i64> @intrinsic_vredmax_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vredmax.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -861,7 +861,7 @@ define <vscale x 1 x i64> @intrinsic_vredmax_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vredmax.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -884,7 +884,7 @@ define <vscale x 1 x i64> @intrinsic_vredmax_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vredmax.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -907,7 +907,7 @@ define <vscale x 1 x i64> @intrinsic_vredmax_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vredmax.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -930,7 +930,7 @@ define <vscale x 1 x i64> @intrinsic_vredmax_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vredmax.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define <vscale x 1 x i64> @intrinsic_vredmax_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vredmax.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vredmax-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredmax-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vredmax-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vredmax-rv64.ll @@ -10,7 +10,7 @@ define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -33,7 +33,7 @@ define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -56,7 +56,7 @@ define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -79,7 +79,7 @@ define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -102,7 +102,7 @@ define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -125,7 +125,7 @@ define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -148,7 +148,7 @@ define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -171,7 +171,7 @@ define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -194,7 +194,7 @@ define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vredmax.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -217,7 +217,7 @@ define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vredmax.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -240,7 +240,7 @@ define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vredmax.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -263,7 +263,7 @@ define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vredmax.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -286,7 +286,7 @@ define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -309,7 +309,7 @@ define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -332,7 +332,7 @@ define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -355,7 +355,7 @@ define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -378,7 +378,7 @@ define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -401,7 +401,7 @@ define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -424,7 +424,7 @@ define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vredmax.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -447,7 +447,7 @@ define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vredmax.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -470,7 +470,7 @@ define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vredmax.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -493,7 +493,7 @@ define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vredmax.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -516,7 +516,7 @@ define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vredmax.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -539,7 +539,7 @@ define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vredmax.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -562,7 +562,7 @@ define <vscale x 2 x i32> @intrinsic_vredmax_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -585,7 +585,7 @@ define <vscale x 2 x i32> @intrinsic_vredmax_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -608,7 +608,7 @@ define <vscale x 2 x i32> @intrinsic_vredmax_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -631,7 +631,7 @@ define <vscale x 2 x i32> @intrinsic_vredmax_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -654,7 +654,7 @@ define <vscale x 2 x i32> @intrinsic_vredmax_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vredmax.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -677,7 +677,7 @@ define <vscale x 2 x i32> @intrinsic_vredmax_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vredmax.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -700,7 +700,7 @@ define <vscale x 2 x i32> @intrinsic_vredmax_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vredmax.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -723,7 +723,7 @@ define <vscale x 2 x i32> @intrinsic_vredmax_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vredmax.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +746,7 @@ define <vscale x 2 x i32> @intrinsic_vredmax_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vredmax.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -769,7 +769,7 @@ define <vscale x 2 x i32> @intrinsic_vredmax_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vredmax.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -792,7 +792,7 @@ define <vscale x 1 x i64> @intrinsic_vredmax_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -815,7 +815,7 @@ define <vscale x 1 x i64> @intrinsic_vredmax_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -838,7 +838,7 @@ define <vscale x 1 x i64> @intrinsic_vredmax_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vredmax.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -861,7 +861,7 @@ define <vscale x 1 x i64> @intrinsic_vredmax_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vredmax.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -884,7 +884,7 @@ define <vscale x 1 x i64> @intrinsic_vredmax_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vredmax.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -907,7 +907,7 @@ define <vscale x 1 x i64> @intrinsic_vredmax_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vredmax.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -930,7 +930,7 @@ define <vscale x 1 x i64> @intrinsic_vredmax_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vredmax.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define <vscale x 1 x i64> @intrinsic_vredmax_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vredmax.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv32.ll @@ -10,7 +10,7 @@ define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -33,7 +33,7 @@ define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -56,7 +56,7 @@ define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -79,7 +79,7 @@ define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -102,7 +102,7 @@ define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -125,7 +125,7 @@ define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -148,7 +148,7 @@ define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -171,7 +171,7 @@ define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -194,7 +194,7 @@ define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -217,7 +217,7 @@ define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -240,7 +240,7 @@ define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -263,7 +263,7 @@ define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -286,7 +286,7 @@ define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -309,7 +309,7 @@ define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -332,7 +332,7 @@ define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -355,7 +355,7 @@ define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -378,7 +378,7 @@ define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -401,7 +401,7 @@ define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -424,7 +424,7 @@ define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -447,7 +447,7 @@ define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -470,7 +470,7 @@ define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -493,7 +493,7 @@ define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -516,7 +516,7 @@ define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -539,7 +539,7 @@ define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -562,7 +562,7 @@ define <vscale x 2 x i32> @intrinsic_vredmaxu_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -585,7 +585,7 @@ define <vscale x 2 x i32> @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -608,7 +608,7 @@ define <vscale x 2 x i32> @intrinsic_vredmaxu_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -631,7 +631,7 @@ define <vscale x 2 x i32> @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -654,7 +654,7 @@ define <vscale x 2 x i32> @intrinsic_vredmaxu_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -677,7 +677,7 @@ define <vscale x 2 x i32> @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -700,7 +700,7 @@ define <vscale x 2 x i32> @intrinsic_vredmaxu_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -723,7 +723,7 @@ define <vscale x 2 x i32> @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +746,7 @@ define <vscale x 2 x i32> @intrinsic_vredmaxu_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -769,7 +769,7 @@ define <vscale x 2 x i32> @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -792,7 +792,7 @@ define <vscale x 1 x i64> @intrinsic_vredmaxu_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -815,7 +815,7 @@ define <vscale x 1 x i64> @intrinsic_vredmaxu_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -838,7 +838,7 @@ define <vscale x 1 x i64> @intrinsic_vredmaxu_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -861,7 +861,7 @@ define <vscale x 1 x i64> @intrinsic_vredmaxu_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -884,7 +884,7 @@ define <vscale x 1 x i64> @intrinsic_vredmaxu_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -907,7 +907,7 @@ define <vscale x 1 x i64> @intrinsic_vredmaxu_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -930,7 +930,7 @@ define <vscale x 1 x i64> @intrinsic_vredmaxu_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define <vscale x 1 x i64> @intrinsic_vredmaxu_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv64.ll @@ -10,7 +10,7 @@ define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -33,7 +33,7 @@ define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -56,7 +56,7 @@ define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -79,7 +79,7 @@ define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -102,7 +102,7 @@ define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -125,7 +125,7 @@ define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -148,7 +148,7 @@ define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -171,7 +171,7 @@ define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -194,7 +194,7 @@ define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -217,7 +217,7 @@ define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -240,7 +240,7 @@ define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -263,7 +263,7 @@ define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -286,7 +286,7 @@ define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -309,7 +309,7 @@ define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -332,7 +332,7 @@ define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -355,7 +355,7 @@ define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -378,7 +378,7 @@ define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -401,7 +401,7 @@ define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -424,7 +424,7 @@ define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -447,7 +447,7 @@ define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -470,7 +470,7 @@ define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -493,7 +493,7 @@ define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -516,7 +516,7 @@ define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -539,7 +539,7 @@ define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -562,7 +562,7 @@ define <vscale x 2 x i32> @intrinsic_vredmaxu_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -585,7 +585,7 @@ define <vscale x 2 x i32> @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -608,7 +608,7 @@ define <vscale x 2 x i32> @intrinsic_vredmaxu_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -631,7 +631,7 @@ define <vscale x 2 x i32> @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -654,7 +654,7 @@ define <vscale x 2 x i32> @intrinsic_vredmaxu_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -677,7 +677,7 @@ define <vscale x 2 x i32> @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -700,7 +700,7 @@ define <vscale x 2 x i32> @intrinsic_vredmaxu_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -723,7 +723,7 @@ define <vscale x 2 x i32> @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +746,7 @@ define <vscale x 2 x i32> @intrinsic_vredmaxu_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -769,7 +769,7 @@ define <vscale x 2 x i32> @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -792,7 +792,7 @@ define <vscale x 1 x i64> @intrinsic_vredmaxu_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -815,7 +815,7 @@ define <vscale x 1 x i64> @intrinsic_vredmaxu_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -838,7 +838,7 @@ define <vscale x 1 x i64> @intrinsic_vredmaxu_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -861,7 +861,7 @@ define <vscale x 1 x i64> @intrinsic_vredmaxu_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -884,7 +884,7 @@ define <vscale x 1 x i64> @intrinsic_vredmaxu_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -907,7 +907,7 @@ define <vscale x 1 x i64> @intrinsic_vredmaxu_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -930,7 +930,7 @@ define <vscale x 1 x i64> @intrinsic_vredmaxu_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define <vscale x 1 x i64> @intrinsic_vredmaxu_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vredmin-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredmin-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vredmin-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vredmin-rv32.ll @@ -10,7 +10,7 @@ define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -33,7 +33,7 @@ define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -56,7 +56,7 @@ define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -79,7 +79,7 @@ define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -102,7 +102,7 @@ define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -125,7 +125,7 @@ define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -148,7 +148,7 @@ define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -171,7 +171,7 @@ define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -194,7 +194,7 @@ define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vredmin.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -217,7 +217,7 @@ define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vredmin.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -240,7 +240,7 @@ define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vredmin.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -263,7 +263,7 @@ define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vredmin.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -286,7 +286,7 @@ define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -309,7 +309,7 @@ define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -332,7 +332,7 @@ define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -355,7 +355,7 @@ define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -378,7 +378,7 @@ define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -401,7 +401,7 @@ define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -424,7 +424,7 @@ define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vredmin.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -447,7 +447,7 @@ define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vredmin.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -470,7 +470,7 @@ define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vredmin.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -493,7 +493,7 @@ define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vredmin.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -516,7 +516,7 @@ define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vredmin.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -539,7 +539,7 @@ define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vredmin.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -562,7 +562,7 @@ define <vscale x 2 x i32> @intrinsic_vredmin_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -585,7 +585,7 @@ define <vscale x 2 x i32> @intrinsic_vredmin_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -608,7 +608,7 @@ define <vscale x 2 x i32> @intrinsic_vredmin_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -631,7 +631,7 @@ define <vscale x 2 x i32> @intrinsic_vredmin_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -654,7 +654,7 @@ define <vscale x 2 x i32> @intrinsic_vredmin_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vredmin.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -677,7 +677,7 @@ define <vscale x 2 x i32> @intrinsic_vredmin_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vredmin.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -700,7 +700,7 @@ define <vscale x 2 x i32> @intrinsic_vredmin_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vredmin.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -723,7 +723,7 @@ define <vscale x 2 x i32> @intrinsic_vredmin_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vredmin.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +746,7 @@ define <vscale x 2 x i32> @intrinsic_vredmin_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vredmin.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -769,7 +769,7 @@ define <vscale x 2 x i32> @intrinsic_vredmin_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vredmin.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -792,7 +792,7 @@ define <vscale x 1 x i64> @intrinsic_vredmin_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -815,7 +815,7 @@ define <vscale x 1 x i64> @intrinsic_vredmin_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -838,7 +838,7 @@ define <vscale x 1 x i64> @intrinsic_vredmin_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vredmin.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -861,7 +861,7 @@ define <vscale x 1 x i64> @intrinsic_vredmin_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vredmin.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -884,7 +884,7 @@ define <vscale x 1 x i64> @intrinsic_vredmin_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vredmin.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -907,7 +907,7 @@ define <vscale x 1 x i64> @intrinsic_vredmin_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vredmin.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -930,7 +930,7 @@ define <vscale x 1 x i64> @intrinsic_vredmin_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vredmin.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define <vscale x 1 x i64> @intrinsic_vredmin_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vredmin.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vredmin-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredmin-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vredmin-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vredmin-rv64.ll @@ -10,7 +10,7 @@ define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -33,7 +33,7 @@ define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -56,7 +56,7 @@ define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -79,7 +79,7 @@ define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -102,7 +102,7 @@ define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -125,7 +125,7 @@ define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -148,7 +148,7 @@ define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -171,7 +171,7 @@ define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -194,7 +194,7 @@ define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vredmin.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -217,7 +217,7 @@ define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vredmin.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -240,7 +240,7 @@ define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vredmin.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -263,7 +263,7 @@ define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vredmin.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -286,7 +286,7 @@ define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -309,7 +309,7 @@ define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -332,7 +332,7 @@ define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -355,7 +355,7 @@ define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -378,7 +378,7 @@ define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -401,7 +401,7 @@ define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -424,7 +424,7 @@ define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vredmin.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -447,7 +447,7 @@ define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vredmin.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -470,7 +470,7 @@ define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vredmin.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -493,7 +493,7 @@ define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vredmin.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -516,7 +516,7 @@ define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vredmin.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -539,7 +539,7 @@ define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vredmin.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -562,7 +562,7 @@ define <vscale x 2 x i32> @intrinsic_vredmin_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -585,7 +585,7 @@ define <vscale x 2 x i32> @intrinsic_vredmin_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -608,7 +608,7 @@ define <vscale x 2 x i32> @intrinsic_vredmin_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -631,7 +631,7 @@ define <vscale x 2 x i32> @intrinsic_vredmin_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -654,7 +654,7 @@ define <vscale x 2 x i32> @intrinsic_vredmin_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vredmin.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -677,7 +677,7 @@ define <vscale x 2 x i32> @intrinsic_vredmin_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vredmin.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -700,7 +700,7 @@ define <vscale x 2 x i32> @intrinsic_vredmin_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vredmin.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -723,7 +723,7 @@ define <vscale x 2 x i32> @intrinsic_vredmin_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vredmin.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +746,7 @@ define <vscale x 2 x i32> @intrinsic_vredmin_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vredmin.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -769,7 +769,7 @@ define <vscale x 2 x i32> @intrinsic_vredmin_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vredmin.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -792,7 +792,7 @@ define <vscale x 1 x i64> @intrinsic_vredmin_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -815,7 +815,7 @@ define <vscale x 1 x i64> @intrinsic_vredmin_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -838,7 +838,7 @@ define <vscale x 1 x i64> @intrinsic_vredmin_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vredmin.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -861,7 +861,7 @@ define <vscale x 1 x i64> @intrinsic_vredmin_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vredmin.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -884,7 +884,7 @@ define <vscale x 1 x i64> @intrinsic_vredmin_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vredmin.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -907,7 +907,7 @@ define <vscale x 1 x i64> @intrinsic_vredmin_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vredmin.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -930,7 +930,7 @@ define <vscale x 1 x i64> @intrinsic_vredmin_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vredmin.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define <vscale x 1 x i64> @intrinsic_vredmin_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vredmin.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vredminu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredminu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vredminu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vredminu-rv32.ll @@ -10,7 +10,7 @@ define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -33,7 +33,7 @@ define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -56,7 +56,7 @@ define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -79,7 +79,7 @@ define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -102,7 +102,7 @@ define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -125,7 +125,7 @@ define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -148,7 +148,7 @@ define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -171,7 +171,7 @@ define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -194,7 +194,7 @@ define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vredminu.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -217,7 +217,7 @@ define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vredminu.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -240,7 +240,7 @@ define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vredminu.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -263,7 +263,7 @@ define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vredminu.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -286,7 +286,7 @@ define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -309,7 +309,7 @@ define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -332,7 +332,7 @@ define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -355,7 +355,7 @@ define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -378,7 +378,7 @@ define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -401,7 +401,7 @@ define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -424,7 +424,7 @@ define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vredminu.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -447,7 +447,7 @@ define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vredminu.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -470,7 +470,7 @@ define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vredminu.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -493,7 +493,7 @@ define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vredminu.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -516,7 +516,7 @@ define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vredminu.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -539,7 +539,7 @@ define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vredminu.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -562,7 +562,7 @@ define <vscale x 2 x i32> @intrinsic_vredminu_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -585,7 +585,7 @@ define <vscale x 2 x i32> @intrinsic_vredminu_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -608,7 +608,7 @@ define <vscale x 2 x i32> @intrinsic_vredminu_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -631,7 +631,7 @@ define <vscale x 2 x i32> @intrinsic_vredminu_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -654,7 +654,7 @@ define <vscale x 2 x i32> @intrinsic_vredminu_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vredminu.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -677,7 +677,7 @@ define <vscale x 2 x i32> @intrinsic_vredminu_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vredminu.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -700,7 +700,7 @@ define <vscale x 2 x i32> @intrinsic_vredminu_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vredminu.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -723,7 +723,7 @@ define <vscale x 2 x i32> @intrinsic_vredminu_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vredminu.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +746,7 @@ define <vscale x 2 x i32> @intrinsic_vredminu_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vredminu.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -769,7 +769,7 @@ define <vscale x 2 x i32> @intrinsic_vredminu_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vredminu.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -792,7 +792,7 @@ define <vscale x 1 x i64> @intrinsic_vredminu_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -815,7 +815,7 @@ define <vscale x 1 x i64> @intrinsic_vredminu_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -838,7 +838,7 @@ define <vscale x 1 x i64> @intrinsic_vredminu_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vredminu.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -861,7 +861,7 @@ define <vscale x 1 x i64> @intrinsic_vredminu_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vredminu.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -884,7 +884,7 @@ define <vscale x 1 x i64> @intrinsic_vredminu_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vredminu.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -907,7 +907,7 @@ define <vscale x 1 x i64> @intrinsic_vredminu_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vredminu.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -930,7 +930,7 @@ define <vscale x 1 x i64> @intrinsic_vredminu_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vredminu.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define <vscale x 1 x i64> @intrinsic_vredminu_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vredminu.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vredminu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredminu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vredminu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vredminu-rv64.ll @@ -10,7 +10,7 @@ define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -33,7 +33,7 @@ define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -56,7 +56,7 @@ define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -79,7 +79,7 @@ define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -102,7 +102,7 @@ define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -125,7 +125,7 @@ define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -148,7 +148,7 @@ define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -171,7 +171,7 @@ define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -194,7 +194,7 @@ define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vredminu.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -217,7 +217,7 @@ define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vredminu.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -240,7 +240,7 @@ define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vredminu.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -263,7 +263,7 @@ define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vredminu.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -286,7 +286,7 @@ define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -309,7 +309,7 @@ define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -332,7 +332,7 @@ define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -355,7 +355,7 @@ define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -378,7 +378,7 @@ define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -401,7 +401,7 @@ define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -424,7 +424,7 @@ define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vredminu.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -447,7 +447,7 @@ define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vredminu.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -470,7 +470,7 @@ define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vredminu.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -493,7 +493,7 @@ define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vredminu.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -516,7 +516,7 @@ define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vredminu.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -539,7 +539,7 @@ define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vredminu.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -562,7 +562,7 @@ define <vscale x 2 x i32> @intrinsic_vredminu_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -585,7 +585,7 @@ define <vscale x 2 x i32> @intrinsic_vredminu_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -608,7 +608,7 @@ define <vscale x 2 x i32> @intrinsic_vredminu_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -631,7 +631,7 @@ define <vscale x 2 x i32> @intrinsic_vredminu_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -654,7 +654,7 @@ define <vscale x 2 x i32> @intrinsic_vredminu_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vredminu.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -677,7 +677,7 @@ define <vscale x 2 x i32> @intrinsic_vredminu_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vredminu.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -700,7 +700,7 @@ define <vscale x 2 x i32> @intrinsic_vredminu_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vredminu.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -723,7 +723,7 @@ define <vscale x 2 x i32> @intrinsic_vredminu_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vredminu.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +746,7 @@ define <vscale x 2 x i32> @intrinsic_vredminu_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vredminu.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -769,7 +769,7 @@ define <vscale x 2 x i32> @intrinsic_vredminu_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vredminu.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -792,7 +792,7 @@ define <vscale x 1 x i64> @intrinsic_vredminu_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -815,7 +815,7 @@ define <vscale x 1 x i64> @intrinsic_vredminu_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -838,7 +838,7 @@ define <vscale x 1 x i64> @intrinsic_vredminu_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vredminu.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -861,7 +861,7 @@ define <vscale x 1 x i64> @intrinsic_vredminu_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vredminu.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -884,7 +884,7 @@ define <vscale x 1 x i64> @intrinsic_vredminu_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vredminu.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -907,7 +907,7 @@ define <vscale x 1 x i64> @intrinsic_vredminu_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vredminu.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -930,7 +930,7 @@ define <vscale x 1 x i64> @intrinsic_vredminu_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vredminu.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define <vscale x 1 x i64> @intrinsic_vredminu_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vredminu.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vredor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredor-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vredor-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vredor-rv32.ll @@ -10,7 +10,7 @@ define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vredor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -33,7 +33,7 @@ define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vredor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -56,7 +56,7 @@ define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vredor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -79,7 +79,7 @@ define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vredor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -102,7 +102,7 @@ define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vredor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -125,7 +125,7 @@ define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vredor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -148,7 +148,7 @@ define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vredor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -171,7 +171,7 @@ define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vredor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -194,7 +194,7 @@ define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vredor.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -217,7 +217,7 @@ define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vredor.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -240,7 +240,7 @@ define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vredor.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -263,7 +263,7 @@ define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vredor.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -286,7 +286,7 @@ define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vredor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -309,7 +309,7 @@ define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vredor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -332,7 +332,7 @@ define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vredor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -355,7 +355,7 @@ define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vredor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -378,7 +378,7 @@ define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vredor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -401,7 +401,7 @@ define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vredor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -424,7 +424,7 @@ define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vredor.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -447,7 +447,7 @@ define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vredor.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -470,7 +470,7 @@ define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vredor.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -493,7 +493,7 @@ define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vredor.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -516,7 +516,7 @@ define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vredor.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -539,7 +539,7 @@ define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vredor.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -562,7 +562,7 @@ define <vscale x 2 x i32> @intrinsic_vredor_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vredor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -585,7 +585,7 @@ define <vscale x 2 x i32> @intrinsic_vredor_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vredor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -608,7 +608,7 @@ define <vscale x 2 x i32> @intrinsic_vredor_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vredor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -631,7 +631,7 @@ define <vscale x 2 x i32> @intrinsic_vredor_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vredor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -654,7 +654,7 @@ define <vscale x 2 x i32> @intrinsic_vredor_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vredor.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -677,7 +677,7 @@ define <vscale x 2 x i32> @intrinsic_vredor_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vredor.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -700,7 +700,7 @@ define <vscale x 2 x i32> @intrinsic_vredor_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vredor.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -723,7 +723,7 @@ define <vscale x 2 x i32> @intrinsic_vredor_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vredor.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +746,7 @@ define <vscale x 2 x i32> @intrinsic_vredor_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vredor.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -769,7 +769,7 @@ define <vscale x 2 x i32> @intrinsic_vredor_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vredor.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -792,7 +792,7 @@ define <vscale x 1 x i64> @intrinsic_vredor_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vredor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -815,7 +815,7 @@ define <vscale x 1 x i64> @intrinsic_vredor_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vredor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -838,7 +838,7 @@ define <vscale x 1 x i64> @intrinsic_vredor_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vredor.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -861,7 +861,7 @@ define <vscale x 1 x i64> @intrinsic_vredor_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vredor.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -884,7 +884,7 @@ define <vscale x 1 x i64> @intrinsic_vredor_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vredor.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -907,7 +907,7 @@ define <vscale x 1 x i64> @intrinsic_vredor_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vredor.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -930,7 +930,7 @@ define <vscale x 1 x i64> @intrinsic_vredor_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vredor.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define <vscale x 1 x i64> @intrinsic_vredor_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vredor.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vredor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredor-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vredor-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vredor-rv64.ll @@ -10,7 +10,7 @@ define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vredor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -33,7 +33,7 @@ define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vredor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -56,7 +56,7 @@ define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vredor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -79,7 +79,7 @@ define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vredor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -102,7 +102,7 @@ define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vredor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -125,7 +125,7 @@ define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vredor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -148,7 +148,7 @@ define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vredor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -171,7 +171,7 @@ define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vredor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -194,7 +194,7 @@ define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vredor.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -217,7 +217,7 @@ define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vredor.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -240,7 +240,7 @@ define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vredor.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -263,7 +263,7 @@ define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vredor.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -286,7 +286,7 @@ define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vredor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -309,7 +309,7 @@ define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vredor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -332,7 +332,7 @@ define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vredor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -355,7 +355,7 @@ define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vredor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -378,7 +378,7 @@ define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vredor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -401,7 +401,7 @@ define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vredor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -424,7 +424,7 @@ define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vredor.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -447,7 +447,7 @@ define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vredor.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -470,7 +470,7 @@ define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vredor.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -493,7 +493,7 @@ define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vredor.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -516,7 +516,7 @@ define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vredor.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -539,7 +539,7 @@ define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vredor.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -562,7 +562,7 @@ define <vscale x 2 x i32> @intrinsic_vredor_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vredor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -585,7 +585,7 @@ define <vscale x 2 x i32> @intrinsic_vredor_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vredor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -608,7 +608,7 @@ define <vscale x 2 x i32> @intrinsic_vredor_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vredor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -631,7 +631,7 @@ define <vscale x 2 x i32> @intrinsic_vredor_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vredor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -654,7 +654,7 @@ define <vscale x 2 x i32> @intrinsic_vredor_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vredor.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -677,7 +677,7 @@ define <vscale x 2 x i32> @intrinsic_vredor_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vredor.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -700,7 +700,7 @@ define <vscale x 2 x i32> @intrinsic_vredor_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vredor.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -723,7 +723,7 @@ define <vscale x 2 x i32> @intrinsic_vredor_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vredor.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +746,7 @@ define <vscale x 2 x i32> @intrinsic_vredor_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vredor.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -769,7 +769,7 @@ define <vscale x 2 x i32> @intrinsic_vredor_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vredor.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -792,7 +792,7 @@ define <vscale x 1 x i64> @intrinsic_vredor_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vredor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -815,7 +815,7 @@ define <vscale x 1 x i64> @intrinsic_vredor_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vredor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -838,7 +838,7 @@ define <vscale x 1 x i64> @intrinsic_vredor_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vredor.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -861,7 +861,7 @@ define <vscale x 1 x i64> @intrinsic_vredor_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vredor.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -884,7 +884,7 @@ define <vscale x 1 x i64> @intrinsic_vredor_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vredor.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -907,7 +907,7 @@ define <vscale x 1 x i64> @intrinsic_vredor_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vredor.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -930,7 +930,7 @@ define <vscale x 1 x i64> @intrinsic_vredor_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vredor.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define <vscale x 1 x i64> @intrinsic_vredor_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vredor.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vredsum-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredsum-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vredsum-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vredsum-rv32.ll @@ -10,7 +10,7 @@ define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -33,7 +33,7 @@ define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -56,7 +56,7 @@ define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -79,7 +79,7 @@ define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -102,7 +102,7 @@ define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -125,7 +125,7 @@ define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -148,7 +148,7 @@ define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -171,7 +171,7 @@ define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -194,7 +194,7 @@ define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vredsum.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -217,7 +217,7 @@ define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vredsum.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -240,7 +240,7 @@ define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vredsum.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -263,7 +263,7 @@ define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vredsum.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -286,7 +286,7 @@ define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -309,7 +309,7 @@ define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -332,7 +332,7 @@ define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -355,7 +355,7 @@ define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -378,7 +378,7 @@ define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -401,7 +401,7 @@ define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -424,7 +424,7 @@ define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vredsum.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -447,7 +447,7 @@ define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vredsum.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -470,7 +470,7 @@ define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vredsum.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -493,7 +493,7 @@ define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vredsum.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -516,7 +516,7 @@ define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vredsum.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -539,7 +539,7 @@ define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vredsum.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -562,7 +562,7 @@ define <vscale x 2 x i32> @intrinsic_vredsum_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -585,7 +585,7 @@ define <vscale x 2 x i32> @intrinsic_vredsum_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -608,7 +608,7 @@ define <vscale x 2 x i32> @intrinsic_vredsum_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -631,7 +631,7 @@ define <vscale x 2 x i32> @intrinsic_vredsum_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -654,7 +654,7 @@ define <vscale x 2 x i32> @intrinsic_vredsum_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vredsum.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -677,7 +677,7 @@ define <vscale x 2 x i32> @intrinsic_vredsum_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vredsum.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -700,7 +700,7 @@ define <vscale x 2 x i32> @intrinsic_vredsum_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vredsum.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -723,7 +723,7 @@ define <vscale x 2 x i32> @intrinsic_vredsum_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vredsum.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +746,7 @@ define <vscale x 2 x i32> @intrinsic_vredsum_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vredsum.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -769,7 +769,7 @@ define <vscale x 2 x i32> @intrinsic_vredsum_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vredsum.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -792,7 +792,7 @@ define <vscale x 1 x i64> @intrinsic_vredsum_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -815,7 +815,7 @@ define <vscale x 1 x i64> @intrinsic_vredsum_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -838,7 +838,7 @@ define <vscale x 1 x i64> @intrinsic_vredsum_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vredsum.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -861,7 +861,7 @@ define <vscale x 1 x i64> @intrinsic_vredsum_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vredsum.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -884,7 +884,7 @@ define <vscale x 1 x i64> @intrinsic_vredsum_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vredsum.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -907,7 +907,7 @@ define <vscale x 1 x i64> @intrinsic_vredsum_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vredsum.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -930,7 +930,7 @@ define <vscale x 1 x i64> @intrinsic_vredsum_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vredsum.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define <vscale x 1 x i64> @intrinsic_vredsum_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vredsum.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vredsum-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredsum-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vredsum-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vredsum-rv64.ll @@ -10,7 +10,7 @@ define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -33,7 +33,7 @@ define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -56,7 +56,7 @@ define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -79,7 +79,7 @@ define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -102,7 +102,7 @@ define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -125,7 +125,7 @@ define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -148,7 +148,7 @@ define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -171,7 +171,7 @@ define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -194,7 +194,7 @@ define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vredsum.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -217,7 +217,7 @@ define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vredsum.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -240,7 +240,7 @@ define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vredsum.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -263,7 +263,7 @@ define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vredsum.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -286,7 +286,7 @@ define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -309,7 +309,7 @@ define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -332,7 +332,7 @@ define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -355,7 +355,7 @@ define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -378,7 +378,7 @@ define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -401,7 +401,7 @@ define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -424,7 +424,7 @@ define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vredsum.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -447,7 +447,7 @@ define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vredsum.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -470,7 +470,7 @@ define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vredsum.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -493,7 +493,7 @@ define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vredsum.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -516,7 +516,7 @@ define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vredsum.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -539,7 +539,7 @@ define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vredsum.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -562,7 +562,7 @@ define <vscale x 2 x i32> @intrinsic_vredsum_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -585,7 +585,7 @@ define <vscale x 2 x i32> @intrinsic_vredsum_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -608,7 +608,7 @@ define <vscale x 2 x i32> @intrinsic_vredsum_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -631,7 +631,7 @@ define <vscale x 2 x i32> @intrinsic_vredsum_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -654,7 +654,7 @@ define <vscale x 2 x i32> @intrinsic_vredsum_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vredsum.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -677,7 +677,7 @@ define <vscale x 2 x i32> @intrinsic_vredsum_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vredsum.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -700,7 +700,7 @@ define <vscale x 2 x i32> @intrinsic_vredsum_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vredsum.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -723,7 +723,7 @@ define <vscale x 2 x i32> @intrinsic_vredsum_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vredsum.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +746,7 @@ define <vscale x 2 x i32> @intrinsic_vredsum_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vredsum.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -769,7 +769,7 @@ define <vscale x 2 x i32> @intrinsic_vredsum_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vredsum.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -792,7 +792,7 @@ define <vscale x 1 x i64> @intrinsic_vredsum_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -815,7 +815,7 @@ define <vscale x 1 x i64> @intrinsic_vredsum_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -838,7 +838,7 @@ define <vscale x 1 x i64> @intrinsic_vredsum_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vredsum.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -861,7 +861,7 @@ define <vscale x 1 x i64> @intrinsic_vredsum_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vredsum.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -884,7 +884,7 @@ define <vscale x 1 x i64> @intrinsic_vredsum_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vredsum.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -907,7 +907,7 @@ define <vscale x 1 x i64> @intrinsic_vredsum_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vredsum.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -930,7 +930,7 @@ define <vscale x 1 x i64> @intrinsic_vredsum_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vredsum.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define <vscale x 1 x i64> @intrinsic_vredsum_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vredsum.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll @@ -11,7 +11,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfredusum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 ; CHECK-NEXT: ret @@ -24,7 +24,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfredosum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 ; CHECK-NEXT: ret @@ -39,7 +39,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfredusum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 ; CHECK-NEXT: ret @@ -52,7 +52,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfredosum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 ; CHECK-NEXT: ret @@ -67,7 +67,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfredusum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 ; CHECK-NEXT: ret @@ -80,7 +80,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfredosum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 ; CHECK-NEXT: ret @@ -105,13 +105,13 @@ ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a2, a1 ; CHECK-NEXT: .LBB6_2: -; CHECK-NEXT: vsetvli zero, a2, e16, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, ma ; CHECK-NEXT: vfredusum.vs v25, v8, v25, v0.t ; CHECK-NEXT: sub a1, a0, a1 ; CHECK-NEXT: sltu a0, a0, a1 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: and a0, a0, a1 -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vfredusum.vs v25, v16, v25, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v25 @@ -135,13 +135,13 @@ ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a2, a1 ; CHECK-NEXT: .LBB7_2: -; CHECK-NEXT: vsetvli zero, a2, e16, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, ma ; CHECK-NEXT: vfredosum.vs v25, v8, v25, v0.t ; CHECK-NEXT: sub a1, a0, a1 ; CHECK-NEXT: sltu a0, a0, a1 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: and a0, a0, a1 -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vfredosum.vs v25, v16, v25, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v25 @@ -157,7 +157,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfredusum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 ; CHECK-NEXT: ret @@ -170,7 +170,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfredosum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 ; CHECK-NEXT: ret @@ -185,7 +185,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfredusum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 ; CHECK-NEXT: ret @@ -198,7 +198,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfredosum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 ; CHECK-NEXT: ret @@ -213,7 +213,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v10, fa0 -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfredusum.vs v10, v8, v10, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v10 ; CHECK-NEXT: ret @@ -226,7 +226,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v10, fa0 -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfredosum.vs v10, v8, v10, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v10 ; CHECK-NEXT: ret @@ -241,7 +241,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfredusum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 ; CHECK-NEXT: ret @@ -254,7 +254,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfredosum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 ; CHECK-NEXT: ret @@ -269,7 +269,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v10, fa0 -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfredusum.vs v10, v8, v10, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v10 ; CHECK-NEXT: ret @@ -282,7 +282,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v10, fa0 -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfredosum.vs v10, v8, v10, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v10 ; CHECK-NEXT: ret @@ -297,7 +297,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v12, fa0 -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfredusum.vs v12, v8, v12, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v12 ; CHECK-NEXT: ret @@ -310,7 +310,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v12, fa0 -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfredosum.vs v12, v8, v12, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v12 ; CHECK-NEXT: ret @@ -325,7 +325,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v12, fa0 -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfredusum.vs v12, v8, v12, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v12 ; CHECK-NEXT: ret @@ -338,7 +338,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v12, fa0 -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfredosum.vs v12, v8, v12, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v12 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll @@ -11,7 +11,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -27,7 +27,7 @@ ; CHECK-NEXT: andi a0, a0, 255 ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -42,7 +42,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -58,7 +58,7 @@ ; CHECK-NEXT: andi a0, a0, 255 ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vredminu.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -73,7 +73,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -88,7 +88,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -103,7 +103,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -118,7 +118,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -133,7 +133,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -149,7 +149,7 @@ ; CHECK-NEXT: andi a0, a0, 255 ; CHECK-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -164,7 +164,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -180,7 +180,7 @@ ; CHECK-NEXT: andi a0, a0, 255 ; CHECK-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vredminu.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -195,7 +195,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -210,7 +210,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -225,7 +225,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -240,7 +240,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -255,7 +255,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -270,7 +270,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -286,7 +286,7 @@ ; CHECK-NEXT: andi a0, a0, 255 ; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -301,7 +301,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -317,7 +317,7 @@ ; CHECK-NEXT: andi a0, a0, 255 ; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vredminu.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -332,7 +332,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -347,7 +347,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -362,7 +362,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -377,7 +377,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -392,7 +392,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -409,7 +409,7 @@ ; RV32-NEXT: srli a0, a0, 16 ; RV32-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; RV32-NEXT: vmv.s.x v9, a0 -; RV32-NEXT: vsetvli zero, a1, e16, mf4, tu, ma +; RV32-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; RV32-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: ret @@ -420,7 +420,7 @@ ; RV64-NEXT: srli a0, a0, 48 ; RV64-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e16, mf4, tu, ma +; RV64-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; RV64-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret @@ -435,7 +435,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -452,7 +452,7 @@ ; RV32-NEXT: srli a0, a0, 16 ; RV32-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; RV32-NEXT: vmv.s.x v9, a0 -; RV32-NEXT: vsetvli zero, a1, e16, mf4, tu, ma +; RV32-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; RV32-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: ret @@ -463,7 +463,7 @@ ; RV64-NEXT: srli a0, a0, 48 ; RV64-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e16, mf4, tu, ma +; RV64-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; RV64-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret @@ -478,7 +478,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -493,7 +493,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -508,7 +508,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -523,7 +523,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -538,7 +538,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -555,7 +555,7 @@ ; RV32-NEXT: srli a0, a0, 16 ; RV32-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV32-NEXT: vmv.s.x v9, a0 -; RV32-NEXT: vsetvli zero, a1, e16, mf2, tu, ma +; RV32-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; RV32-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: ret @@ -566,7 +566,7 @@ ; RV64-NEXT: srli a0, a0, 48 ; RV64-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e16, mf2, tu, ma +; RV64-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; RV64-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret @@ -581,7 +581,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -598,7 +598,7 @@ ; RV32-NEXT: srli a0, a0, 16 ; RV32-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV32-NEXT: vmv.s.x v9, a0 -; RV32-NEXT: vsetvli zero, a1, e16, mf2, tu, ma +; RV32-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; RV32-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: ret @@ -609,7 +609,7 @@ ; RV64-NEXT: srli a0, a0, 48 ; RV64-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e16, mf2, tu, ma +; RV64-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; RV64-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret @@ -624,7 +624,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -639,7 +639,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -654,7 +654,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -669,7 +669,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -684,7 +684,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -701,7 +701,7 @@ ; RV32-NEXT: srli a0, a0, 16 ; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV32-NEXT: vmv.s.x v9, a0 -; RV32-NEXT: vsetvli zero, a1, e16, m1, tu, ma +; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; RV32-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: ret @@ -712,7 +712,7 @@ ; RV64-NEXT: srli a0, a0, 48 ; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e16, m1, tu, ma +; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; RV64-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret @@ -727,7 +727,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -744,7 +744,7 @@ ; RV32-NEXT: srli a0, a0, 16 ; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV32-NEXT: vmv.s.x v9, a0 -; RV32-NEXT: vsetvli zero, a1, e16, m1, tu, ma +; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; RV32-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: ret @@ -755,7 +755,7 @@ ; RV64-NEXT: srli a0, a0, 48 ; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e16, m1, tu, ma +; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; RV64-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret @@ -770,7 +770,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -785,7 +785,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -800,7 +800,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -815,7 +815,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -830,7 +830,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -845,7 +845,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV32-NEXT: vmv.s.x v9, a0 -; RV32-NEXT: vsetvli zero, a1, e32, mf2, tu, ma +; RV32-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; RV32-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: ret @@ -856,7 +856,7 @@ ; RV64-NEXT: srli a0, a0, 32 ; RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e32, mf2, tu, ma +; RV64-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; RV64-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret @@ -871,7 +871,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -886,7 +886,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV32-NEXT: vmv.s.x v9, a0 -; RV32-NEXT: vsetvli zero, a1, e32, mf2, tu, ma +; RV32-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; RV32-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: ret @@ -897,7 +897,7 @@ ; RV64-NEXT: srli a0, a0, 32 ; RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e32, mf2, tu, ma +; RV64-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; RV64-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret @@ -912,7 +912,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -927,7 +927,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -942,7 +942,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -957,7 +957,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -972,7 +972,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -987,7 +987,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32-NEXT: vmv.s.x v9, a0 -; RV32-NEXT: vsetvli zero, a1, e32, m1, tu, ma +; RV32-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; RV32-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: ret @@ -998,7 +998,7 @@ ; RV64-NEXT: srli a0, a0, 32 ; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e32, m1, tu, ma +; RV64-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; RV64-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret @@ -1013,7 +1013,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -1028,7 +1028,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32-NEXT: vmv.s.x v9, a0 -; RV32-NEXT: vsetvli zero, a1, e32, m1, tu, ma +; RV32-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; RV32-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: ret @@ -1039,7 +1039,7 @@ ; RV64-NEXT: srli a0, a0, 32 ; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e32, m1, tu, ma +; RV64-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; RV64-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret @@ -1054,7 +1054,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -1069,7 +1069,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -1084,7 +1084,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -1099,7 +1099,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -1114,7 +1114,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v10, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vredsum.vs v10, v8, v10, v0.t ; CHECK-NEXT: vmv.x.s a0, v10 ; CHECK-NEXT: ret @@ -1129,7 +1129,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32-NEXT: vmv.s.x v10, a0 -; RV32-NEXT: vsetvli zero, a1, e32, m2, tu, ma +; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; RV32-NEXT: vredmaxu.vs v10, v8, v10, v0.t ; RV32-NEXT: vmv.x.s a0, v10 ; RV32-NEXT: ret @@ -1140,7 +1140,7 @@ ; RV64-NEXT: srli a0, a0, 32 ; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64-NEXT: vmv.s.x v10, a0 -; RV64-NEXT: vsetvli zero, a1, e32, m2, tu, ma +; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; RV64-NEXT: vredmaxu.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 ; RV64-NEXT: ret @@ -1168,12 +1168,12 @@ ; RV32-NEXT: # %bb.1: ; RV32-NEXT: mv a1, a3 ; RV32-NEXT: .LBB67_2: -; RV32-NEXT: vsetvli zero, a1, e32, m8, tu, ma +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; RV32-NEXT: vredmaxu.vs v25, v8, v25, v0.t ; RV32-NEXT: vmv.x.s a0, v25 ; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32-NEXT: vmv.s.x v8, a0 -; RV32-NEXT: vsetvli zero, a2, e32, m8, tu, ma +; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; RV32-NEXT: vmv1r.v v0, v24 ; RV32-NEXT: vredmaxu.vs v8, v16, v8, v0.t ; RV32-NEXT: vmv.x.s a0, v8 @@ -1198,12 +1198,12 @@ ; RV64-NEXT: .LBB67_2: ; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64-NEXT: vmv.s.x v25, a2 -; RV64-NEXT: vsetvli zero, a1, e32, m8, tu, ma +; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; RV64-NEXT: vredmaxu.vs v25, v8, v25, v0.t ; RV64-NEXT: vmv.x.s a1, v25 ; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64-NEXT: vmv.s.x v8, a1 -; RV64-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; RV64-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; RV64-NEXT: vmv1r.v v0, v24 ; RV64-NEXT: vredmaxu.vs v8, v16, v8, v0.t ; RV64-NEXT: vmv.x.s a0, v8 @@ -1219,7 +1219,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v10, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vredmax.vs v10, v8, v10, v0.t ; CHECK-NEXT: vmv.x.s a0, v10 ; CHECK-NEXT: ret @@ -1234,7 +1234,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32-NEXT: vmv.s.x v10, a0 -; RV32-NEXT: vsetvli zero, a1, e32, m2, tu, ma +; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; RV32-NEXT: vredminu.vs v10, v8, v10, v0.t ; RV32-NEXT: vmv.x.s a0, v10 ; RV32-NEXT: ret @@ -1245,7 +1245,7 @@ ; RV64-NEXT: srli a0, a0, 32 ; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64-NEXT: vmv.s.x v10, a0 -; RV64-NEXT: vsetvli zero, a1, e32, m2, tu, ma +; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; RV64-NEXT: vredminu.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 ; RV64-NEXT: ret @@ -1260,7 +1260,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v10, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vredmin.vs v10, v8, v10, v0.t ; CHECK-NEXT: vmv.x.s a0, v10 ; CHECK-NEXT: ret @@ -1275,7 +1275,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v10, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vredand.vs v10, v8, v10, v0.t ; CHECK-NEXT: vmv.x.s a0, v10 ; CHECK-NEXT: ret @@ -1290,7 +1290,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v10, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vredor.vs v10, v8, v10, v0.t ; CHECK-NEXT: vmv.x.s a0, v10 ; CHECK-NEXT: ret @@ -1305,7 +1305,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v10, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vredxor.vs v10, v8, v10, v0.t ; CHECK-NEXT: vmv.x.s a0, v10 ; CHECK-NEXT: ret @@ -1325,7 +1325,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, ma +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vredsum.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: li a1, 32 @@ -1339,7 +1339,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vredsum.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret @@ -1357,7 +1357,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e32, mf2, tu, ma +; RV32-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; RV32-NEXT: vwredsum.vs v9, v8, v9, v0.t ; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; RV32-NEXT: vmv.x.s a0, v9 @@ -1372,7 +1372,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e32, mf2, tu, ma +; RV64-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; RV64-NEXT: vwredsum.vs v9, v8, v9, v0.t ; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; RV64-NEXT: vmv.x.s a0, v9 @@ -1392,7 +1392,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e32, mf2, tu, ma +; RV32-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; RV32-NEXT: vwredsum.vs v9, v8, v9, v0.t ; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; RV32-NEXT: vmv.x.s a0, v9 @@ -1407,7 +1407,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e32, mf2, tu, ma +; RV64-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; RV64-NEXT: vwredsum.vs v9, v8, v9, v0.t ; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; RV64-NEXT: vmv.x.s a0, v9 @@ -1429,7 +1429,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, ma +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: li a1, 32 @@ -1443,7 +1443,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret @@ -1463,7 +1463,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, ma +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vredmax.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: li a1, 32 @@ -1477,7 +1477,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vredmax.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret @@ -1497,7 +1497,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, ma +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: li a1, 32 @@ -1511,7 +1511,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret @@ -1531,7 +1531,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, ma +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vredmin.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: li a1, 32 @@ -1545,7 +1545,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vredmin.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret @@ -1565,7 +1565,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, ma +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vredand.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: li a1, 32 @@ -1579,7 +1579,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vredand.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret @@ -1599,7 +1599,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, ma +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vredor.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: li a1, 32 @@ -1613,7 +1613,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vredor.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret @@ -1633,7 +1633,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, ma +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vredxor.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: li a1, 32 @@ -1647,7 +1647,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vredxor.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret @@ -1667,7 +1667,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, ma +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vredsum.vs v10, v8, v10, v0.t ; RV32-NEXT: vmv.x.s a0, v10 ; RV32-NEXT: li a1, 32 @@ -1681,7 +1681,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v10, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, ma +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vredsum.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 ; RV64-NEXT: ret @@ -1699,7 +1699,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e32, m1, tu, ma +; RV32-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; RV32-NEXT: vwredsum.vs v9, v8, v9, v0.t ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vmv.x.s a0, v9 @@ -1713,7 +1713,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e32, m1, tu, ma +; RV64-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; RV64-NEXT: vwredsum.vs v9, v8, v9, v0.t ; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; RV64-NEXT: vmv.x.s a0, v9 @@ -1733,7 +1733,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e32, m1, tu, ma +; RV32-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; RV32-NEXT: vwredsum.vs v9, v8, v9, v0.t ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vmv.x.s a0, v9 @@ -1747,7 +1747,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e32, m1, tu, ma +; RV64-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; RV64-NEXT: vwredsum.vs v9, v8, v9, v0.t ; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; RV64-NEXT: vmv.x.s a0, v9 @@ -1769,7 +1769,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, ma +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vredmaxu.vs v10, v8, v10, v0.t ; RV32-NEXT: vmv.x.s a0, v10 ; RV32-NEXT: li a1, 32 @@ -1783,7 +1783,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v10, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, ma +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vredmaxu.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 ; RV64-NEXT: ret @@ -1803,7 +1803,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, ma +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vredmax.vs v10, v8, v10, v0.t ; RV32-NEXT: vmv.x.s a0, v10 ; RV32-NEXT: li a1, 32 @@ -1817,7 +1817,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v10, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, ma +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vredmax.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 ; RV64-NEXT: ret @@ -1837,7 +1837,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, ma +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vredminu.vs v10, v8, v10, v0.t ; RV32-NEXT: vmv.x.s a0, v10 ; RV32-NEXT: li a1, 32 @@ -1851,7 +1851,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v10, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, ma +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vredminu.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 ; RV64-NEXT: ret @@ -1871,7 +1871,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, ma +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vredmin.vs v10, v8, v10, v0.t ; RV32-NEXT: vmv.x.s a0, v10 ; RV32-NEXT: li a1, 32 @@ -1885,7 +1885,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v10, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, ma +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vredmin.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 ; RV64-NEXT: ret @@ -1905,7 +1905,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, ma +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vredand.vs v10, v8, v10, v0.t ; RV32-NEXT: vmv.x.s a0, v10 ; RV32-NEXT: li a1, 32 @@ -1919,7 +1919,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v10, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, ma +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vredand.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 ; RV64-NEXT: ret @@ -1939,7 +1939,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, ma +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vredor.vs v10, v8, v10, v0.t ; RV32-NEXT: vmv.x.s a0, v10 ; RV32-NEXT: li a1, 32 @@ -1953,7 +1953,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v10, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, ma +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vredor.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 ; RV64-NEXT: ret @@ -1973,7 +1973,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, ma +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vredxor.vs v10, v8, v10, v0.t ; RV32-NEXT: vmv.x.s a0, v10 ; RV32-NEXT: li a1, 32 @@ -1987,7 +1987,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v10, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, ma +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vredxor.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 ; RV64-NEXT: ret @@ -2007,7 +2007,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, ma +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vredsum.vs v12, v8, v12, v0.t ; RV32-NEXT: vmv.x.s a0, v12 ; RV32-NEXT: li a1, 32 @@ -2021,7 +2021,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v12, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, ma +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vredsum.vs v12, v8, v12, v0.t ; RV64-NEXT: vmv.x.s a0, v12 ; RV64-NEXT: ret @@ -2039,7 +2039,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e32, m2, tu, ma +; RV32-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; RV32-NEXT: vwredsum.vs v10, v8, v10, v0.t ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vmv.x.s a0, v10 @@ -2053,7 +2053,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v10, a0 -; RV64-NEXT: vsetvli zero, a1, e32, m2, tu, ma +; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; RV64-NEXT: vwredsum.vs v10, v8, v10, v0.t ; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; RV64-NEXT: vmv.x.s a0, v10 @@ -2073,7 +2073,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e32, m2, tu, ma +; RV32-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; RV32-NEXT: vwredsumu.vs v10, v8, v10, v0.t ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vmv.x.s a0, v10 @@ -2087,7 +2087,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v10, a0 -; RV64-NEXT: vsetvli zero, a1, e32, m2, tu, ma +; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; RV64-NEXT: vwredsumu.vs v10, v8, v10, v0.t ; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; RV64-NEXT: vmv.x.s a0, v10 @@ -2109,7 +2109,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, ma +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vredmaxu.vs v12, v8, v12, v0.t ; RV32-NEXT: vmv.x.s a0, v12 ; RV32-NEXT: li a1, 32 @@ -2123,7 +2123,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v12, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, ma +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vredmaxu.vs v12, v8, v12, v0.t ; RV64-NEXT: vmv.x.s a0, v12 ; RV64-NEXT: ret @@ -2143,7 +2143,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, ma +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vredmax.vs v12, v8, v12, v0.t ; RV32-NEXT: vmv.x.s a0, v12 ; RV32-NEXT: li a1, 32 @@ -2157,7 +2157,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v12, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, ma +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vredmax.vs v12, v8, v12, v0.t ; RV64-NEXT: vmv.x.s a0, v12 ; RV64-NEXT: ret @@ -2177,7 +2177,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, ma +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vredminu.vs v12, v8, v12, v0.t ; RV32-NEXT: vmv.x.s a0, v12 ; RV32-NEXT: li a1, 32 @@ -2191,7 +2191,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v12, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, ma +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vredminu.vs v12, v8, v12, v0.t ; RV64-NEXT: vmv.x.s a0, v12 ; RV64-NEXT: ret @@ -2211,7 +2211,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, ma +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vredmin.vs v12, v8, v12, v0.t ; RV32-NEXT: vmv.x.s a0, v12 ; RV32-NEXT: li a1, 32 @@ -2225,7 +2225,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v12, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, ma +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vredmin.vs v12, v8, v12, v0.t ; RV64-NEXT: vmv.x.s a0, v12 ; RV64-NEXT: ret @@ -2245,7 +2245,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, ma +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vredand.vs v12, v8, v12, v0.t ; RV32-NEXT: vmv.x.s a0, v12 ; RV32-NEXT: li a1, 32 @@ -2259,7 +2259,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v12, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, ma +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vredand.vs v12, v8, v12, v0.t ; RV64-NEXT: vmv.x.s a0, v12 ; RV64-NEXT: ret @@ -2279,7 +2279,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, ma +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vredor.vs v12, v8, v12, v0.t ; RV32-NEXT: vmv.x.s a0, v12 ; RV32-NEXT: li a1, 32 @@ -2293,7 +2293,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v12, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, ma +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vredor.vs v12, v8, v12, v0.t ; RV64-NEXT: vmv.x.s a0, v12 ; RV64-NEXT: ret @@ -2313,7 +2313,7 @@ ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, ma +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vredxor.vs v12, v8, v12, v0.t ; RV32-NEXT: vmv.x.s a0, v12 ; RV32-NEXT: li a1, 32 @@ -2327,7 +2327,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v12, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, ma +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vredxor.vs v12, v8, v12, v0.t ; RV64-NEXT: vmv.x.s a0, v12 ; RV64-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vredxor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredxor-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vredxor-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vredxor-rv32.ll @@ -10,7 +10,7 @@ define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -33,7 +33,7 @@ define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -56,7 +56,7 @@ define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -79,7 +79,7 @@ define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -102,7 +102,7 @@ define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -125,7 +125,7 @@ define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -148,7 +148,7 @@ define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -171,7 +171,7 @@ define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -194,7 +194,7 @@ define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vredxor.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -217,7 +217,7 @@ define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vredxor.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -240,7 +240,7 @@ define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vredxor.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -263,7 +263,7 @@ define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vredxor.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -286,7 +286,7 @@ define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -309,7 +309,7 @@ define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -332,7 +332,7 @@ define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -355,7 +355,7 @@ define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -378,7 +378,7 @@ define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -401,7 +401,7 @@ define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -424,7 +424,7 @@ define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vredxor.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -447,7 +447,7 @@ define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vredxor.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -470,7 +470,7 @@ define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vredxor.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -493,7 +493,7 @@ define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vredxor.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -516,7 +516,7 @@ define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vredxor.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -539,7 +539,7 @@ define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vredxor.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -562,7 +562,7 @@ define <vscale x 2 x i32> @intrinsic_vredxor_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -585,7 +585,7 @@ define <vscale x 2 x i32> @intrinsic_vredxor_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -608,7 +608,7 @@ define <vscale x 2 x i32> @intrinsic_vredxor_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -631,7 +631,7 @@ define <vscale x 2 x i32> @intrinsic_vredxor_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -654,7 +654,7 @@ define <vscale x 2 x i32> @intrinsic_vredxor_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vredxor.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -677,7 +677,7 @@ define <vscale x 2 x i32> @intrinsic_vredxor_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vredxor.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -700,7 +700,7 @@ define <vscale x 2 x i32> @intrinsic_vredxor_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vredxor.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -723,7 +723,7 @@ define <vscale x 2 x i32> @intrinsic_vredxor_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vredxor.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +746,7 @@ define <vscale x 2 x i32> @intrinsic_vredxor_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vredxor.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -769,7 +769,7 @@ define <vscale x 2 x i32> @intrinsic_vredxor_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vredxor.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -792,7 +792,7 @@ define <vscale x 1 x i64> @intrinsic_vredxor_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -815,7 +815,7 @@ define <vscale x 1 x i64> @intrinsic_vredxor_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -838,7 +838,7 @@ define <vscale x 1 x i64> @intrinsic_vredxor_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vredxor.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -861,7 +861,7 @@ define <vscale x 1 x i64> @intrinsic_vredxor_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vredxor.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -884,7 +884,7 @@ define <vscale x 1 x i64> @intrinsic_vredxor_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vredxor.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -907,7 +907,7 @@ define <vscale x 1 x i64> @intrinsic_vredxor_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vredxor.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -930,7 +930,7 @@ define <vscale x 1 x i64> @intrinsic_vredxor_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vredxor.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define <vscale x 1 x i64> @intrinsic_vredxor_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vredxor.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vredxor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredxor-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vredxor-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vredxor-rv64.ll @@ -10,7 +10,7 @@ define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -33,7 +33,7 @@ define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -56,7 +56,7 @@ define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -79,7 +79,7 @@ define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -102,7 +102,7 @@ define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -125,7 +125,7 @@ define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -148,7 +148,7 @@ define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -171,7 +171,7 @@ define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -194,7 +194,7 @@ define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vredxor.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -217,7 +217,7 @@ define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vredxor.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -240,7 +240,7 @@ define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vredxor.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -263,7 +263,7 @@ define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vredxor.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -286,7 +286,7 @@ define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -309,7 +309,7 @@ define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -332,7 +332,7 @@ define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -355,7 +355,7 @@ define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -378,7 +378,7 @@ define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -401,7 +401,7 @@ define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -424,7 +424,7 @@ define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vredxor.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -447,7 +447,7 @@ define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vredxor.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -470,7 +470,7 @@ define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vredxor.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -493,7 +493,7 @@ define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vredxor.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -516,7 +516,7 @@ define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vredxor.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -539,7 +539,7 @@ define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vredxor.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -562,7 +562,7 @@ define <vscale x 2 x i32> @intrinsic_vredxor_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -585,7 +585,7 @@ define <vscale x 2 x i32> @intrinsic_vredxor_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -608,7 +608,7 @@ define <vscale x 2 x i32> @intrinsic_vredxor_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -631,7 +631,7 @@ define <vscale x 2 x i32> @intrinsic_vredxor_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -654,7 +654,7 @@ define <vscale x 2 x i32> @intrinsic_vredxor_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vredxor.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -677,7 +677,7 @@ define <vscale x 2 x i32> @intrinsic_vredxor_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vredxor.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -700,7 +700,7 @@ define <vscale x 2 x i32> @intrinsic_vredxor_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vredxor.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -723,7 +723,7 @@ define <vscale x 2 x i32> @intrinsic_vredxor_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vredxor.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +746,7 @@ define <vscale x 2 x i32> @intrinsic_vredxor_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vredxor.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -769,7 +769,7 @@ define <vscale x 2 x i32> @intrinsic_vredxor_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vredxor.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -792,7 +792,7 @@ define <vscale x 1 x i64> @intrinsic_vredxor_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -815,7 +815,7 @@ define <vscale x 1 x i64> @intrinsic_vredxor_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -838,7 +838,7 @@ define <vscale x 1 x i64> @intrinsic_vredxor_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vredxor.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -861,7 +861,7 @@ define <vscale x 1 x i64> @intrinsic_vredxor_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vredxor.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -884,7 +884,7 @@ define <vscale x 1 x i64> @intrinsic_vredxor_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vredxor.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -907,7 +907,7 @@ define <vscale x 1 x i64> @intrinsic_vredxor_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vredxor.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -930,7 +930,7 @@ define <vscale x 1 x i64> @intrinsic_vredxor_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vredxor.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define <vscale x 1 x i64> @intrinsic_vredxor_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vredxor.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir @@ -781,7 +781,7 @@ ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF ; CHECK-NEXT: [[PseudoVMV_S_X_M1_:%[0-9]+]]:vr = PseudoVMV_S_X_M1 [[DEF]], [[COPY5]], 1, 5 /* e32 */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF - ; CHECK-NEXT: [[PseudoVREDSUM_VS_M1_:%[0-9]+]]:vr = PseudoVREDSUM_VS_M1 [[DEF1]], [[PseudoVADD_VV_M1_]], killed [[PseudoVMV_S_X_M1_]], 4, 5 /* e32 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: [[PseudoVREDSUM_VS_M1_:%[0-9]+]]:vr = PseudoVREDSUM_VS_M1 [[DEF1]], [[PseudoVADD_VV_M1_]], killed [[PseudoVMV_S_X_M1_]], 4, 5 /* e32 */, 1 /* ta, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 1, 208 /* e32, m1, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: PseudoVSE32_V_M1 killed [[PseudoVREDSUM_VS_M1_]], [[COPY]], 1, 5 /* e32 */, implicit $vl, implicit $vtype :: (store (s32) into %ir.res) ; CHECK-NEXT: PseudoRET @@ -815,7 +815,7 @@ %21:vr = IMPLICIT_DEF %20:vr = PseudoVMV_S_X_M1 %21, %19, 1, 5 %24:vr = IMPLICIT_DEF - %23:vr = PseudoVREDSUM_VS_M1 %24, %16, killed %20, 4, 5 + %23:vr = PseudoVREDSUM_VS_M1 %24, %16, killed %20, 4, 5, 1 PseudoVSE32_V_M1 killed %23, %8, 1, 5 :: (store (s32) into %ir.res) PseudoRET diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir @@ -311,7 +311,7 @@ ; CHECK-NEXT: [[PseudoVMV_V_I_M1_:%[0-9]+]]:vr = PseudoVMV_V_I_M1 0, -1, 6 /* e64 */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 2, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: [[PseudoVREDSUM_VS_M1_:%[0-9]+]]:vr = PseudoVREDSUM_VS_M1 [[DEF]], killed [[PseudoVLE64_V_M1_]], killed [[PseudoVMV_V_I_M1_]], 2, 6 /* e64 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: [[PseudoVREDSUM_VS_M1_:%[0-9]+]]:vr = PseudoVREDSUM_VS_M1 [[DEF]], killed [[PseudoVLE64_V_M1_]], killed [[PseudoVMV_V_I_M1_]], 2, 6 /* e64 */, 1 /* ta, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[PseudoVMV_X_S_M1_:%[0-9]+]]:gpr = PseudoVMV_X_S_M1 killed [[PseudoVREDSUM_VS_M1_]], 6 /* e64 */, implicit $vtype ; CHECK-NEXT: $x10 = COPY [[PseudoVMV_X_S_M1_]] ; CHECK-NEXT: PseudoRET implicit $x10 @@ -319,7 +319,7 @@ %1:vr = PseudoVLE64_V_M1 %0, 2, 6 :: (load (s128) from %ir.x) %2:vr = PseudoVMV_V_I_M1 0, -1, 6 %4:vr = IMPLICIT_DEF - %3:vr = PseudoVREDSUM_VS_M1 %4, killed %1, killed %2, 2, 6 + %3:vr = PseudoVREDSUM_VS_M1 %4, killed %1, killed %2, 2, 6, 1 %5:gpr = PseudoVMV_X_S_M1 killed %3, 6 $x10 = COPY %5 PseudoRET implicit $x10 diff --git a/llvm/test/CodeGen/RISCV/rvv/vwredsum-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwredsum-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwredsum-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwredsum-rv32.ll @@ -10,7 +10,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv1i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv1i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -33,7 +33,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv1i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i8> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv1i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -56,7 +56,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv2i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv2i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -79,7 +79,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv2i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i8> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv2i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -102,7 +102,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv4i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -125,7 +125,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv4i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -148,7 +148,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv8i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv8i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -171,7 +171,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv8i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i8> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv8i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -194,7 +194,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv16i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv16i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -217,7 +217,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv16i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i8> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv16i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -240,7 +240,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv32i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv32i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -263,7 +263,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv32i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i8> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv32i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -286,7 +286,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv64i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 64 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv64i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -309,7 +309,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv64i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 64 x i8> %1, <vscale x 4 x i16> %2, <vscale x 64 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv64i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -332,7 +332,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv1i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv1i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -355,7 +355,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv1i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i16> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv1i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -378,7 +378,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv2i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -401,7 +401,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv2i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -424,7 +424,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv4i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv4i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -447,7 +447,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv4i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i16> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv4i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -470,7 +470,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv8i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv8i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -493,7 +493,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv8i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i16> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv8i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -516,7 +516,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv16i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv16i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -539,7 +539,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv16i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i16> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv16i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -562,7 +562,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv32i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 32 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv32i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -585,7 +585,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv32i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 32 x i16> %1, <vscale x 2 x i32> %2, <vscale x 32 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv32i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -608,7 +608,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -631,7 +631,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -654,7 +654,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv2i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv2i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -677,7 +677,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv2i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i32> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv2i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -700,7 +700,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv4i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv4i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -723,7 +723,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv4i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i32> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv4i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +746,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv8i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv8i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -769,7 +769,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv8i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i32> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv8i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -792,7 +792,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv16i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 16 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv16i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -815,7 +815,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv16i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 16 x i32> %1, <vscale x 1 x i64> %2, <vscale x 16 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv16i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vwredsum-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwredsum-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwredsum-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwredsum-rv64.ll @@ -10,7 +10,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv1i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv1i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -33,7 +33,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv1i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i8> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv1i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -56,7 +56,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv2i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv2i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -79,7 +79,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv2i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i8> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv2i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -102,7 +102,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv4i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -125,7 +125,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv4i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -148,7 +148,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv8i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv8i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -171,7 +171,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv8i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i8> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv8i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -194,7 +194,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv16i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv16i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -217,7 +217,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv16i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i8> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv16i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -240,7 +240,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv32i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv32i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -263,7 +263,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv32i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i8> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv32i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -286,7 +286,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv64i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 64 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv64i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -309,7 +309,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv64i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 64 x i8> %1, <vscale x 4 x i16> %2, <vscale x 64 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv64i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -332,7 +332,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv1i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv1i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -355,7 +355,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv1i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i16> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv1i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -378,7 +378,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv2i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -401,7 +401,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv2i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -424,7 +424,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv4i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv4i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -447,7 +447,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv4i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i16> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv4i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -470,7 +470,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv8i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv8i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -493,7 +493,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv8i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i16> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv8i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -516,7 +516,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv16i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv16i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -539,7 +539,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv16i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i16> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv16i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -562,7 +562,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv32i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 32 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv32i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -585,7 +585,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv32i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 32 x i16> %1, <vscale x 2 x i32> %2, <vscale x 32 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv32i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -608,7 +608,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -631,7 +631,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -654,7 +654,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv2i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i32> %1, <vscale x 1 x i64> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv2i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -677,7 +677,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv2i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i32> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv2i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -700,7 +700,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv4i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i32> %1, <vscale x 1 x i64> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv4i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -723,7 +723,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv4i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i32> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv4i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +746,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv8i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i32> %1, <vscale x 1 x i64> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv8i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -769,7 +769,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv8i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i32> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv8i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -792,7 +792,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv16i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 16 x i32> %1, <vscale x 1 x i64> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv16i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -815,7 +815,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv16i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 16 x i32> %1, <vscale x 1 x i64> %2, <vscale x 16 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv16i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vwredsumu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwredsumu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwredsumu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwredsumu-rv32.ll @@ -10,7 +10,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv1i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv1i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -33,7 +33,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv1i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i8> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv1i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -56,7 +56,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv2i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv2i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -79,7 +79,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv2i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i8> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv2i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -102,7 +102,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv4i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -125,7 +125,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv4i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -148,7 +148,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv8i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv8i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -171,7 +171,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv8i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i8> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv8i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -194,7 +194,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv16i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv16i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -217,7 +217,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv16i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i8> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv16i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -240,7 +240,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv32i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv32i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -263,7 +263,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv32i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i8> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv32i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -286,7 +286,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv64i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 64 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv64i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -309,7 +309,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv64i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 64 x i8> %1, <vscale x 4 x i16> %2, <vscale x 64 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv64i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -332,7 +332,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv1i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv1i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -355,7 +355,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv1i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i16> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv1i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -378,7 +378,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv2i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -401,7 +401,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv2i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -424,7 +424,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv4i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv4i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -447,7 +447,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv4i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i16> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv4i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -470,7 +470,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv8i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv8i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -493,7 +493,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv8i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i16> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv8i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -516,7 +516,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv16i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv16i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -539,7 +539,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv16i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i16> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv16i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -562,7 +562,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv32i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 32 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv32i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -585,7 +585,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv32i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 32 x i16> %1, <vscale x 2 x i32> %2, <vscale x 32 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv32i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -608,7 +608,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -631,7 +631,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -654,7 +654,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv2i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv2i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -677,7 +677,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv2i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i32> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv2i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -700,7 +700,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv4i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv4i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -723,7 +723,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv4i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i32> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv4i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +746,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv8i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv8i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -769,7 +769,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv8i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i32> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv8i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -792,7 +792,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv16i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 16 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv16i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -815,7 +815,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv16i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 16 x i32> %1, <vscale x 1 x i64> %2, <vscale x 16 x i1> %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv16i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vwredsumu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwredsumu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwredsumu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwredsumu-rv64.ll @@ -10,7 +10,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv1i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv1i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -33,7 +33,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv1i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i8> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv1i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -56,7 +56,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv2i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv2i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -79,7 +79,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv2i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i8> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv2i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -102,7 +102,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv4i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -125,7 +125,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv4i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -148,7 +148,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv8i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv8i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -171,7 +171,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv8i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i8> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv8i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -194,7 +194,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv16i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv16i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -217,7 +217,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv16i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i8> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv16i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -240,7 +240,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv32i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv32i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -263,7 +263,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv32i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i8> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv32i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -286,7 +286,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv64i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 64 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv64i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -309,7 +309,7 @@ define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv64i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 64 x i8> %1, <vscale x 4 x i16> %2, <vscale x 64 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv64i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -332,7 +332,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv1i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv1i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -355,7 +355,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv1i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i16> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv1i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -378,7 +378,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv2i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -401,7 +401,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv2i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -424,7 +424,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv4i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv4i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -447,7 +447,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv4i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i16> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv4i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -470,7 +470,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv8i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv8i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -493,7 +493,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv8i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i16> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv8i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -516,7 +516,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv16i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv16i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -539,7 +539,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv16i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i16> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv16i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -562,7 +562,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv32i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 32 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv32i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -585,7 +585,7 @@ define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv32i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 32 x i16> %1, <vscale x 2 x i32> %2, <vscale x 32 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv32i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -608,7 +608,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -631,7 +631,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -654,7 +654,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv2i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i32> %1, <vscale x 1 x i64> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv2i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -677,7 +677,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv2i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i32> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv2i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -700,7 +700,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv4i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i32> %1, <vscale x 1 x i64> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv4i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -723,7 +723,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv4i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i32> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv4i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +746,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv8i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i32> %1, <vscale x 1 x i64> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv8i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -769,7 +769,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv8i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i32> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv8i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -792,7 +792,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv16i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 16 x i32> %1, <vscale x 1 x i64> %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv16i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -815,7 +815,7 @@ define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv16i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 16 x i32> %1, <vscale x 1 x i64> %2, <vscale x 16 x i1> %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv16i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: