diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -1589,3 +1589,4 @@ // Vendor extensions //===----------------------------------------------------------------------===// include "llvm/IR/IntrinsicsRISCVXTHead.td" +include "llvm/IR/IntrinsicsRISCVXsf.td" diff --git a/llvm/include/llvm/IR/IntrinsicsRISCVXsf.td b/llvm/include/llvm/IR/IntrinsicsRISCVXsf.td new file mode 100644 --- /dev/null +++ b/llvm/include/llvm/IR/IntrinsicsRISCVXsf.td @@ -0,0 +1,123 @@ +class VCIXSuffix { + list suffix = !cond(!eq(range, "c"): ["e8mf8", "e8mf4", "e8mf2", "e8m1", "e8m2", "e8m4", "e8m8"], + !eq(range, "s"): ["e16mf4", "e16mf2", "e16m1", "e16m2", "e16m4", "e16m8"], + !eq(range, "i"): ["e32mf2", "e32m1", "e32m2", "e32m4", "e32m8"], + !eq(range, "l"): ["e64m1", "e64m2", "e64m4", "e64m8"]); +} + +let TargetPrefix = "riscv" in { + // Output: (vector_out) or () + // Input: (bit<27-26>, bit<24-20>, scalar_in, vl) or + // (bit<27-26>, bit<24-20>, bit<11-7>, scalar_in, vl) + class RISCVSFCustomVC_X + : Intrinsic], + [llvm_anyint_ty, LLVMMatchType<0>, LLVMMatchType<0>]), + [llvm_any_ty, llvm_anyint_ty]), + !listconcat([IntrNoMem, ImmArg>, ImmArg>], // bit<27-26> and bit<24-20> + !if(HasDst, [], [ImmArg>]), // Vd or bit<11-7> + !if(ImmScalar, !if(HasDst, [ImmArg>], + [ImmArg>]), []), // ScalarOperand + !if(HasSE, [IntrHasSideEffects], []))>, + RISCVVIntrinsic { + let ScalarOperand = !cond(ImmScalar: NoScalarOperand, + HasDst: 2, + true: 3); + let VLOperand = !if(HasDst, 3, 4); + } + // Output: (vector_out) or () + // Input: (bit<27-26>, vector_in, vector_in/scalar_in, vl) or + // (bit<27-26>, bit<11-7>, vector_in, vector_in/scalar_in, vl) + class RISCVSFCustomVC_XV + : Intrinsic], + [llvm_anyint_ty, LLVMMatchType<0>, llvm_anyvector_ty]), + [llvm_any_ty, llvm_anyint_ty]), + !listconcat([IntrNoMem, ImmArg>], // bit<27-26> + !if(HasDst, [], [ImmArg>]), // Vd or bit<11-7> + !if(ImmScalar, !if(HasDst, [ImmArg>], + [ImmArg>]), []), // ScalarOperand + !if(HasSE, [IntrHasSideEffects], []))>, + RISCVVIntrinsic { + let ScalarOperand = !cond(ImmScalar: NoScalarOperand, + HasDst: 2, + true: 3); + let VLOperand = !if(HasDst, 3, 4); + } + // Output: (vector_out) or () + // Input: (bit<27-26>, passthru, vector_in, vector_in/scalar_in, vl) or + // (bit<27-26>, vector_in, vector_in, vector_in/scalar_in, vl) + class RISCVSFCustomVC_XVV + : Intrinsic, LLVMMatchType<0>], + [llvm_anyint_ty, llvm_anyvector_ty, LLVMMatchType<1>]), + [llvm_any_ty, llvm_anyint_ty]), + !listconcat([IntrNoMem, ImmArg>], // bit<27-26> + !if(ImmScalar, [ImmArg>], []), // ScalarOperand + !if(HasSE, [IntrHasSideEffects], []))>, + RISCVVIntrinsic { + let ScalarOperand = !if(ImmScalar, NoScalarOperand, 3); + let VLOperand = 4; + } + // Output: (wvector_out) or () + // Input: (bit<27-26>, passthru, vector_in, vector_in/scalar_in, vl) or + // (bit<27-26>, wvector_in, vector_in, vector_in/scalar_in, vl) + class RISCVSFCustomVC_XVW + : Intrinsic, llvm_anyvector_ty], + [llvm_anyint_ty, llvm_anyvector_ty, llvm_anyvector_ty]), + [llvm_any_ty, llvm_anyint_ty]), + !listconcat([IntrNoMem, ImmArg>], // bit<27-26> + !if(ImmScalar, [ImmArg>], []), // ScalarOperand + !if(HasSE, [IntrHasSideEffects], []))>, + RISCVVIntrinsic { + let ScalarOperand = !if(ImmScalar, NoScalarOperand, 3); + let VLOperand = 4; + } + + multiclass RISCVSFCustomVC_X type> { + foreach t = type in { + defvar ImmScalar = !eq(t, "i"); + defvar range = ["c", "s", "i", "l"]; + foreach r = range in { + foreach s = VCIXSuffix.suffix in { + def "int_riscv_sf_vc_" # t # "_se_" # s : RISCVSFCustomVC_X; + } + } + def "int_riscv_sf_vc_v_" # t # "_se" : RISCVSFCustomVC_X; + def "int_riscv_sf_vc_v_" # t : RISCVSFCustomVC_X; + } + } + + multiclass RISCVSFCustomVC_XV type> { + foreach t = type in { + defvar ImmScalar = !eq(t, "i"); + def "int_riscv_sf_vc_" # t # "v_se" : RISCVSFCustomVC_XV; + def "int_riscv_sf_vc_v_" # t # "v_se" : RISCVSFCustomVC_XV; + def "int_riscv_sf_vc_v_" # t # "v" : RISCVSFCustomVC_XV; + } + } + + multiclass RISCVSFCustomVC_XVV type> { + foreach t = type in { + defvar ImmScalar = !eq(t, "i"); + def "int_riscv_sf_vc_" # t # "vv_se" : RISCVSFCustomVC_XVV; + def "int_riscv_sf_vc_v_" # t # "vv_se" : RISCVSFCustomVC_XVV; + def "int_riscv_sf_vc_v_" # t # "vv" : RISCVSFCustomVC_XVV; + } + } + + multiclass RISCVSFCustomVC_XVW type> { + foreach t = type in { + defvar ImmScalar = !eq(t, "i"); + def "int_riscv_sf_vc_" # t # "vw_se" : RISCVSFCustomVC_XVW; + def "int_riscv_sf_vc_v_" # t # "vw_se" : RISCVSFCustomVC_XVW; + def "int_riscv_sf_vc_v_" # t # "vw" : RISCVSFCustomVC_XVW; + } + } + + defm "" : RISCVSFCustomVC_X<["x", "i"]>; + defm "" : RISCVSFCustomVC_XV<["x", "i", "v", "f"]>; + defm "" : RISCVSFCustomVC_XVV<["x", "i", "v", "f"]>; + defm "" : RISCVSFCustomVC_XVW<["x", "i", "v", "f"]>; +} // TargetPrefix = "riscv" diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -508,10 +508,12 @@ // RVV intrinsics may have illegal operands. // We also need to custom legalize vmv.x.s. - setOperationAction({ISD::INTRINSIC_WO_CHAIN, ISD::INTRINSIC_W_CHAIN}, + setOperationAction({ISD::INTRINSIC_WO_CHAIN, ISD::INTRINSIC_W_CHAIN, + ISD::INTRINSIC_VOID}, {MVT::i8, MVT::i16}, Custom); if (Subtarget.is64Bit()) - setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i32, Custom); + setOperationAction({ISD::INTRINSIC_W_CHAIN, ISD::INTRINSIC_VOID}, + MVT::i32, Custom); else setOperationAction({ISD::INTRINSIC_WO_CHAIN, ISD::INTRINSIC_W_CHAIN}, MVT::i64, Custom); @@ -5986,15 +5988,18 @@ // promoted or expanded. static SDValue lowerVectorIntrinsicScalars(SDValue Op, SelectionDAG &DAG, const RISCVSubtarget &Subtarget) { - assert((Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || + assert((Op.getOpcode() == ISD::INTRINSIC_VOID || + Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || Op.getOpcode() == ISD::INTRINSIC_W_CHAIN) && "Unexpected opcode"); if (!Subtarget.hasVInstructions()) return SDValue(); - bool HasChain = Op.getOpcode() == ISD::INTRINSIC_W_CHAIN; + bool HasChain = Op.getOpcode() == ISD::INTRINSIC_VOID || + Op.getOpcode() == ISD::INTRINSIC_W_CHAIN; unsigned IntNo = Op.getConstantOperandVal(HasChain ? 1 : 0); + SDLoc DL(Op); const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II = @@ -6463,7 +6468,7 @@ } } - return SDValue(); + return lowerVectorIntrinsicScalars(Op, DAG, Subtarget); } static unsigned getRVVReductionOp(unsigned ISDOpcode) { diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -78,7 +78,8 @@ ["_M1", ""], ["_M2", ""], ["_M4", ""], - ["_M8", ""] + ["_M8", ""], + ["_SE", ""] ]; string VInst = !foldl(PseudoInst, AffixSubsts, Acc, AffixSubst, !subst(AffixSubst[0], AffixSubst[1], Acc)); diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td b/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td @@ -27,6 +27,41 @@ def VCIX_XVV : VCIXType<0b1010>; def VCIX_XVW : VCIXType<0b1111>; +// The payload and timm5 operands are all marked as ImmArg in the IR +// intrinsic and will be target constant, so use TImmLeaf rather than ImmLeaf. +def payload1 : Operand, TImmLeaf(Imm);}]> { + let ParserMatchClass = UImmAsmOperand<1>; + let DecoderMethod = "decodeUImmOperand<1>"; + let OperandType = "OPERAND_UIMM1"; + let OperandNamespace = "RISCVOp"; +} + +def payload2 : Operand, TImmLeaf(Imm);}]> { + let ParserMatchClass = UImmAsmOperand<2>; + let DecoderMethod = "decodeUImmOperand<2>"; + let OperandType = "OPERAND_UIMM2"; + let OperandNamespace = "RISCVOp"; +} + +def payload5 : Operand, TImmLeaf(Imm);}]> { + let ParserMatchClass = UImmAsmOperand<5>; + let DecoderMethod = "decodeUImmOperand<5>"; + let OperandType = "OPERAND_UIMM5"; + let OperandNamespace = "RISCVOp"; +} + +def timm5 : Operand, TImmLeaf(Imm);}]> { + let ParserMatchClass = SImmAsmOperand<5>; + let EncoderMethod = "getImmOpValue"; + let DecoderMethod = "decodeSImmOperand<5>"; + let MCOperandPredicate = [{ + int64_t Imm; + if (MCOp.evaluateAsConstantImm(Imm)) + return isInt<5>(Imm); + return MCOp.isBareSymbolRef(); + }]; +} + class SwapVCIXIns { dag Ins = !con(funct6, !if(swap, rs2, rd), !if(swap, rd, rs2), rs1); } @@ -76,6 +111,15 @@ let RVVConstraint = NoConstraint; } +class GetFTypeInfo { + ValueType Scalar = !cond(!eq(sew, 16): f16, + !eq(sew, 32): f32, + !eq(sew, 64): f64); + RegisterClass ScalarRegClass = !cond(!eq(sew, 16): FPR16, + !eq(sew, 32): FPR32, + !eq(sew, 64): FPR64); +} + class VCIXInfo { string OpcodeStr = !if(HaveOutputDst, "sf.vc.v." # suffix, @@ -160,3 +204,303 @@ defm VVW : CustomSiFiveVCIX<"vvw", VCIX_XVW, VR, VR, VR>, Sched<[]>; defm FVW : CustomSiFiveVCIX<"fvw", VCIX_XVW, VR, VR, FPR32>, Sched<[]>; } + +class VPseudoVC_X : + Pseudo<(outs), + (ins OpClass:$op1, payload5:$rs2, payload5:$rd, RS1Class:$r1, + AVL:$vl, ixlenimm:$sew), []>, + RISCVVPseudo { + let mayLoad = 0; + let mayStore = 0; + let HasVLOp = 1; + let HasSEWOp = 1; + let hasSideEffects = HasSideEffect; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + +class VPseudoVC_XV : + Pseudo<(outs), + (ins OpClass:$op1, payload5:$rd, RS2Class:$rs2, RS1Class:$r1, + AVL:$vl, ixlenimm:$sew), []>, + RISCVVPseudo { + let mayLoad = 0; + let mayStore = 0; + let HasVLOp = 1; + let HasSEWOp = 1; + let hasSideEffects = HasSideEffect; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + +class VPseudoVC_XVV : + Pseudo<(outs), + (ins OpClass:$op1, RDClass:$rd, RS2Class:$rs2, RS1Class:$r1, + AVL:$vl, ixlenimm:$sew), []>, + RISCVVPseudo { + let mayLoad = 0; + let mayStore = 0; + let HasVLOp = 1; + let HasSEWOp = 1; + let hasSideEffects = HasSideEffect; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + +class VPseudoVC_V_X : + Pseudo<(outs RDClass:$rd), + (ins OpClass:$op1, payload5:$rs2, RS1Class:$r1, + AVL:$vl, ixlenimm:$sew), []>, + RISCVVPseudo { + let mayLoad = 0; + let mayStore = 0; + let HasVLOp = 1; + let HasSEWOp = 1; + let hasSideEffects = HasSideEffect; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + +class VPseudoVC_V_XV : + Pseudo<(outs RDClass:$rd), + (ins OpClass:$op1, RS2Class:$rs2, RS1Class:$r1, + AVL:$vl, ixlenimm:$sew), []>, + RISCVVPseudo { + let mayLoad = 0; + let mayStore = 0; + let HasVLOp = 1; + let HasSEWOp = 1; + let hasSideEffects = HasSideEffect; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + +class VPseudoVC_V_XVV : + Pseudo<(outs RDClass:$rd), + (ins OpClass:$op1, RDClass:$rs3, RS2Class:$rs2, RS1Class:$r1, + AVL:$vl, ixlenimm:$sew), []>, + RISCVVPseudo { + let mayLoad = 0; + let mayStore = 0; + let HasVLOp = 1; + let HasSEWOp = 1; + let hasSideEffects = HasSideEffect; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + +multiclass VPseudoVC_X { + let VLMul = m.value in { + def "PseudoVC_" # NAME # "_SE_" # m.MX : VPseudoVC_X; + def "PseudoVC_V_" # NAME # "_SE_" # m.MX : VPseudoVC_V_X; + def "PseudoVC_V_" # NAME # "_" # m.MX : VPseudoVC_V_X; + } +} + +multiclass VPseudoVC_XV { + let VLMul = m.value in { + def "PseudoVC_" # NAME # "_SE_" # m.MX : VPseudoVC_XV; + def "PseudoVC_V_" # NAME # "_SE_" # m.MX : VPseudoVC_V_XV; + def "PseudoVC_V_" # NAME # "_" # m.MX : VPseudoVC_V_XV; + } +} + +multiclass VPseudoVC_XVV { + let VLMul = m.value in { + def "PseudoVC_" # NAME # "_SE_" # m.MX : VPseudoVC_XVV; + def "PseudoVC_V_" # NAME # "_SE_" # m.MX : VPseudoVC_V_XVV; + def "PseudoVC_V_" # NAME # "_" # m.MX : VPseudoVC_V_XVV; + } +} + +multiclass VPseudoVC_XVW { + let VLMul = m.value in { + def "PseudoVC_" # NAME # "_SE_" # m.MX : VPseudoVC_XVV; + let Constraints = "@earlyclobber $rd, $rd = $rs3" in { + def "PseudoVC_V_" # NAME # "_SE_" # m.MX : VPseudoVC_V_XVV; + def "PseudoVC_V_" # NAME # "_" # m.MX : VPseudoVC_V_XVV; + } + } +} + +let Predicates = [HasVendorXSfvcp] in { + foreach m = MxList in { + defm X : VPseudoVC_X; + defm I : VPseudoVC_X; + defm XV : VPseudoVC_XV; + defm IV : VPseudoVC_XV; + defm VV : VPseudoVC_XV; + defm XVV : VPseudoVC_XVV; + defm IVV : VPseudoVC_XVV; + defm VVV : VPseudoVC_XVV; + } + foreach f = FPList in { + foreach m = f.MxList in { + defm f.FX # "V" : VPseudoVC_XV; + defm f.FX # "VV" : VPseudoVC_XVV; + } + } + foreach m = MxListW in { + defm XVW : VPseudoVC_XVW; + defm IVW : VPseudoVC_XVW; + defm VVW : VPseudoVC_XVW; + } + foreach f = FPListW in { + foreach m = f.MxList in + defm f.FX # "VW" : VPseudoVC_XVW; + } +} + +class VPatVC_OP4 : + Pat<(!cast(intrinsic_name) + (XLenVT op1_kind:$op1), + (op2_type op2_kind:$op2), + (op3_type op3_kind:$op3), + (op4_type op4_kind:$op4), + VLOpFrag), + (!cast(inst) + (XLenVT op1_kind:$op1), + (op2_type op2_kind:$op2), + (op3_type op3_kind:$op3), + (op4_type op4_kind:$op4), + GPR:$vl, sew)>; + +class VPatVC_V_OP4 : + Pat<(result_type (!cast(intrinsic_name) + (XLenVT op1_kind:$op1), + (op2_type op2_kind:$op2), + (op3_type op3_kind:$op3), + (op4_type op4_kind:$op4), + VLOpFrag)), + (!cast(inst) + (XLenVT op1_kind:$op1), + (op2_type op2_kind:$op2), + (op3_type op3_kind:$op3), + (op4_type op4_kind:$op4), + GPR:$vl, sew)>; + +class VPatVC_V_OP3 : + Pat<(result_type (!cast(intrinsic_name) + (XLenVT op1_kind:$op1), + (op2_type op2_kind:$op2), + (op3_type op3_kind:$op3), + VLOpFrag)), + (!cast(inst) + (XLenVT op1_kind:$op1), + (op2_type op2_kind:$op2), + (op3_type op3_kind:$op3), + GPR:$vl, sew)>; + +multiclass VPatVC_X { + def : VPatVC_OP4<"int_riscv_sf_vc_" # intrinsic_suffix # "_se_e" # vti.SEW # !tolower(vti.LMul.MX), + "PseudoVC_" # instruction_suffix # "_SE_" # vti.LMul.MX, + XLenVT, XLenVT, type, vti.Log2SEW, + payload5, payload5, kind>; + def : VPatVC_V_OP3<"int_riscv_sf_vc_v_" # intrinsic_suffix # "_se", + "PseudoVC_V_" # instruction_suffix # "_SE_" # vti.LMul.MX, + vti.Vector, XLenVT, type, vti.Log2SEW, + payload5, kind>; + def : VPatVC_V_OP3<"int_riscv_sf_vc_v_" # intrinsic_suffix, + "PseudoVC_V_" # instruction_suffix # "_" # vti.LMul.MX, + vti.Vector, XLenVT, type, vti.Log2SEW, + payload5, kind>; +} + +multiclass VPatVC_XV { + def : VPatVC_OP4<"int_riscv_sf_vc_" # intrinsic_suffix # "_se", + "PseudoVC_" # instruction_suffix # "_SE_" # vti.LMul.MX, + XLenVT, vti.Vector, type, vti.Log2SEW, + payload5, vti.RegClass, kind, op1_kind>; + def : VPatVC_V_OP3<"int_riscv_sf_vc_v_" # intrinsic_suffix # "_se", + "PseudoVC_V_" # instruction_suffix # "_SE_" # vti.LMul.MX, + vti.Vector, vti.Vector, type, vti.Log2SEW, + vti.RegClass, kind, op1_kind>; + def : VPatVC_V_OP3<"int_riscv_sf_vc_v_" # intrinsic_suffix, + "PseudoVC_V_" # instruction_suffix # "_" # vti.LMul.MX, + vti.Vector, vti.Vector, type, vti.Log2SEW, + vti.RegClass, kind, op1_kind>; +} + +multiclass VPatVC_XVV { + def : VPatVC_OP4<"int_riscv_sf_vc_" # intrinsic_suffix # "_se", + "PseudoVC_" # instruction_suffix # "_SE_" # vti.LMul.MX, + wti.Vector, vti.Vector, type, vti.Log2SEW, + wti.RegClass, vti.RegClass, kind, op1_kind>; + def : VPatVC_V_OP4<"int_riscv_sf_vc_v_" # intrinsic_suffix # "_se", + "PseudoVC_V_" # instruction_suffix # "_SE_" # vti.LMul.MX, + wti.Vector, wti.Vector, vti.Vector, type, vti.Log2SEW, + wti.RegClass, vti.RegClass, kind, op1_kind>; + def : VPatVC_V_OP4<"int_riscv_sf_vc_v_" # intrinsic_suffix, + "PseudoVC_V_" # instruction_suffix # "_" # vti.LMul.MX, + wti.Vector, wti.Vector, vti.Vector, type, vti.Log2SEW, + wti.RegClass, vti.RegClass, kind, op1_kind>; +} + +let Predicates = [HasVendorXSfvcp] in { + foreach vti = AllIntegerVectors in { + defm : VPatVC_X<"x", "X", vti, vti.Scalar, vti.ScalarRegClass>; + defm : VPatVC_X<"i", "I", vti, XLenVT, timm5>; + defm : VPatVC_XV<"xv", "XV", vti, vti.Scalar, vti.ScalarRegClass>; + defm : VPatVC_XV<"iv", "IV", vti, XLenVT, timm5>; + defm : VPatVC_XV<"vv", "VV", vti, vti.Vector, vti.RegClass>; + defm : VPatVC_XVV<"xvv", "XVV", vti, vti, vti.Scalar, vti.ScalarRegClass>; + defm : VPatVC_XVV<"ivv", "IVV", vti, vti, XLenVT, timm5>; + defm : VPatVC_XVV<"vvv", "VVV", vti, vti, vti.Vector, vti.RegClass>; + if !ge(vti.SEW, 16) then { + defm : VPatVC_XV<"fv", "F" # vti.SEW # "V", vti, + GetFTypeInfo.Scalar, + GetFTypeInfo.ScalarRegClass, payload1>; + defm : VPatVC_XVV<"fvv", "F" # vti.SEW # "VV", vti, vti, + GetFTypeInfo.Scalar, + GetFTypeInfo.ScalarRegClass, payload1>; + } + } + foreach VtiToWti = AllWidenableIntVectors in { + defvar vti = VtiToWti.Vti; + defvar wti = VtiToWti.Wti; + defm : VPatVC_XVV<"xvw", "XVW", wti, vti, vti.Scalar, vti.ScalarRegClass>; + defm : VPatVC_XVV<"ivw", "IVW", wti, vti, XLenVT, timm5>; + defm : VPatVC_XVV<"vvw", "VVW", wti, vti, vti.Vector, vti.RegClass>; + if !ge(vti.SEW, 16) then { + defm : VPatVC_XVV<"fvw", "F" # vti.SEW # "VW", wti, vti, + GetFTypeInfo.Scalar, + GetFTypeInfo.ScalarRegClass, payload1>; + } + } +} diff --git a/llvm/test/CodeGen/RISCV/rvv/xsfvcp-x.ll b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-x.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-x.ll @@ -0,0 +1,1565 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+xsfvcp \ +; RUN: -verify-machineinstrs | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+xsfvcp \ +; RUN: -verify-machineinstrs | FileCheck %s + +define void @test_sf_vc_x_se_e8mf8(i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_x_se_e8mf8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: sf.vc.x 3, 31, 31, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.x.se.e8mf8.iXLen.i8.iXLen(iXLen 3, iXLen 31, iXLen 31, i8 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.x.se.e8mf8.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen) + +define void @test_sf_vc_x_se_e8mf4(i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_x_se_e8mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: sf.vc.x 3, 31, 31, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.x.se.e8mf4.iXLen.i8.iXLen(iXLen 3, iXLen 31, iXLen 31, i8 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.x.se.e8mf4.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen) + +define void @test_sf_vc_x_se_e8mf2(i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_x_se_e8mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: sf.vc.x 3, 31, 31, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.x.se.e8mf2.iXLen.i8.iXLen(iXLen 3, iXLen 31, iXLen 31, i8 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.x.se.e8mf2.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen) + +define void @test_sf_vc_x_se_e8m1(i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_x_se_e8m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: sf.vc.x 3, 31, 31, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.x.se.e8m1.iXLen.i8.iXLen(iXLen 3, iXLen 31, iXLen 31, i8 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.x.se.e8m1.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen) + +define void @test_sf_vc_x_se_e8m2(i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_x_se_e8m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: sf.vc.x 3, 31, 31, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.x.se.e8m2.iXLen.i8.iXLen(iXLen 3, iXLen 31, iXLen 31, i8 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.x.se.e8m2.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen) + +define void @test_sf_vc_x_se_e8m4(i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_x_se_e8m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: sf.vc.x 3, 31, 31, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.x.se.e8m4.iXLen.i8.iXLen(iXLen 3, iXLen 31, iXLen 31, i8 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.x.se.e8m4.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen) + +define void @test_sf_vc_x_se_e8m8(i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_x_se_e8m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: sf.vc.x 3, 31, 31, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.x.se.e8m8.iXLen.i8.iXLen(iXLen 3, iXLen 31, iXLen 31, i8 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.x.se.e8m8.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen) + +define void @test_sf_vc_x_se_e16mf4(i16 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_x_se_e16mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.x 3, 31, 31, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.x.se.e16mf4.iXLen.i16.iXLen(iXLen 3, iXLen 31, iXLen 31, i16 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.x.se.e16mf4.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen) + +define void @test_sf_vc_x_se_e16mf2(i16 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_x_se_e16mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.x 3, 31, 31, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.x.se.e16mf2.iXLen.i16.iXLen(iXLen 3, iXLen 31, iXLen 31, i16 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.x.se.e16mf2.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen) + +define void @test_sf_vc_x_se_e16m1(i16 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_x_se_e16m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.x 3, 31, 31, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.x.se.e16m1.iXLen.i16.iXLen(iXLen 3, iXLen 31, iXLen 31, i16 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.x.se.e16m1.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen) + +define void @test_sf_vc_x_se_e16m2(i16 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_x_se_e16m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.x 3, 31, 31, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.x.se.e16m2.iXLen.i16.iXLen(iXLen 3, iXLen 31, iXLen 31, i16 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.x.se.e16m2.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen) + +define void @test_sf_vc_x_se_e16m4(i16 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_x_se_e16m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: sf.vc.x 3, 31, 31, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.x.se.e16m4.iXLen.i16.iXLen(iXLen 3, iXLen 31, iXLen 31, i16 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.x.se.e16m4.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen) + +define void @test_sf_vc_x_se_e16m8(i16 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_x_se_e16m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: sf.vc.x 3, 31, 31, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.x.se.e16m8.iXLen.i16.iXLen(iXLen 3, iXLen 31, iXLen 31, i16 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.x.se.e16m8.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen) + +define void @test_sf_vc_x_se_e32mf2(i32 signext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_x_se_e32mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.x 3, 31, 31, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.x.se.e32mf2.iXLen.i32.iXLen(iXLen 3, iXLen 31, iXLen 31, i32 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.x.se.e32mf2.iXLen.i32.iXLen(iXLen, iXLen, iXLen, i32, iXLen) + +define void @test_sf_vc_x_se_e32m1(i32 signext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_x_se_e32m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: sf.vc.x 3, 31, 31, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.x.se.e32m1.iXLen.i32.iXLen(iXLen 3, iXLen 31, iXLen 31, i32 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.x.se.e32m1.iXLen.i32.iXLen(iXLen, iXLen, iXLen, i32, iXLen) + +define void @test_sf_vc_x_se_e32m2(i32 signext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_x_se_e32m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.x 3, 31, 31, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.x.se.e32m2.iXLen.i32.iXLen(iXLen 3, iXLen 31, iXLen 31, i32 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.x.se.e32m2.iXLen.i32.iXLen(iXLen, iXLen, iXLen, i32, iXLen) + +define void @test_sf_vc_x_se_e32m4(i32 signext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_x_se_e32m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.x 3, 31, 31, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.x.se.e32m4.iXLen.i32.iXLen(iXLen 3, iXLen 31, iXLen 31, i32 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.x.se.e32m4.iXLen.i32.iXLen(iXLen, iXLen, iXLen, i32, iXLen) + +define void @test_sf_vc_x_se_e32m8(i32 signext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_x_se_e32m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: sf.vc.x 3, 31, 31, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.x.se.e32m8.iXLen.i32.iXLen(iXLen 3, iXLen 31, iXLen 31, i32 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.x.se.e32m8.iXLen.i32.iXLen(iXLen, iXLen, iXLen, i32, iXLen) + +define @test_sf_vc_v_x_se_e8mf8(i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_x_se_e8mf8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.x.se.nxv1i8.iXLen.i8.iXLen(iXLen 3, iXLen 31, i8 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.x.se.nxv1i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) + +define @test_sf_vc_v_x_se_e8mf4(i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_x_se_e8mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.x.se.nxv2i8.iXLen.i8.iXLen(iXLen 3, iXLen 31, i8 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.x.se.nxv2i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) + +define @test_sf_vc_v_x_se_e8mf2(i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_x_se_e8mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.x.se.nxv4i8.iXLen.i8.iXLen(iXLen 3, iXLen 31, i8 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.x.se.nxv4i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) + +define @test_sf_vc_v_x_se_e8m1(i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_x_se_e8m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.x.se.nxv8i8.iXLen.i8.iXLen(iXLen 3, iXLen 31, i8 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.x.se.nxv8i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) + +define @test_sf_vc_v_x_se_e8m2(i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_x_se_e8m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.x.se.nxv16i8.iXLen.i8.iXLen(iXLen 3, iXLen 31, i8 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.x.se.nxv16i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) + +define @test_sf_vc_v_x_se_e8m4(i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_x_se_e8m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.x.se.nxv32i8.iXLen.i8.iXLen(iXLen 3, iXLen 31, i8 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.x.se.nxv32i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) + +define @test_sf_vc_v_x_se_e8m8(i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_x_se_e8m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.x.se.nxv64i8.iXLen.i8.iXLen(iXLen 3, iXLen 31, i8 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.x.se.nxv64i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) + +define @test_sf_vc_v_x_se_e16mf4(i16 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_x_se_e16mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.x.se.nxv1i16.iXLen.i16.iXLen(iXLen 3, iXLen 31, i16 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.x.se.nxv1i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) + +define @test_sf_vc_v_x_se_e16mf2(i16 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_x_se_e16mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.x.se.nxv2i16.iXLen.i16.iXLen(iXLen 3, iXLen 31, i16 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.x.se.nxv2i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) + +define @test_sf_vc_v_x_se_e16m1(i16 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_x_se_e16m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.x.se.nxv4i16.iXLen.i16.iXLen(iXLen 3, iXLen 31, i16 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.x.se.nxv4i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) + +define @test_sf_vc_v_x_se_e16m2(i16 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_x_se_e16m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.x.se.nxv8i16.iXLen.i16.iXLen(iXLen 3, iXLen 31, i16 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.x.se.nxv8i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) + +define @test_sf_vc_v_x_se_e16m4(i16 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_x_se_e16m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.x.se.nxv16i16.iXLen.i16.iXLen(iXLen 3, iXLen 31, i16 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.x.se.nxv16i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) + +define @test_sf_vc_v_x_se_e16m8(i16 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_x_se_e16m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.x.se.nxv32i16.iXLen.i16.iXLen(iXLen 3, iXLen 31, i16 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.x.se.nxv32i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) + +define @test_sf_vc_v_x_se_e32mf2(i32 signext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_x_se_e32mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.x.se.nxv1i32.iXLen.i32.iXLen(iXLen 3, iXLen 31, i32 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.x.se.nxv1i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen) + +define @test_sf_vc_v_x_se_e32m1(i32 signext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_x_se_e32m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.x.se.nxv2i32.iXLen.i32.iXLen(iXLen 3, iXLen 31, i32 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.x.se.nxv2i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen) + +define @test_sf_vc_v_x_se_e32m2(i32 signext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_x_se_e32m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.x.se.nxv4i32.iXLen.i32.iXLen(iXLen 3, iXLen 31, i32 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.x.se.nxv4i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen) + +define @test_sf_vc_v_x_se_e32m4(i32 signext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_x_se_e32m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.x.se.nxv8i32.iXLen.i32.iXLen(iXLen 3, iXLen 31, i32 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.x.se.nxv8i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen) + +define @test_sf_vc_v_x_se_e32m8(i32 signext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_x_se_e32m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.x.se.nxv16i32.iXLen.i32.iXLen(iXLen 3, iXLen 31, i32 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.x.se.nxv16i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen) + +define @test_sf_vc_v_x_e8mf8(i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_x_e8mf8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.x.nxv1i8.iXLen.i8.iXLen(iXLen 3, iXLen 31, i8 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.x.nxv1i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) + +define @test_sf_vc_v_x_e8mf4(i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_x_e8mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.x.nxv2i8.iXLen.i8.iXLen(iXLen 3, iXLen 31, i8 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.x.nxv2i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) + +define @test_sf_vc_v_x_e8mf2(i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_x_e8mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.x.nxv4i8.iXLen.i8.iXLen(iXLen 3, iXLen 31, i8 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.x.nxv4i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) + +define @test_sf_vc_v_x_e8m1(i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_x_e8m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.x.nxv8i8.iXLen.i8.iXLen(iXLen 3, iXLen 31, i8 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.x.nxv8i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) + +define @test_sf_vc_v_x_e8m2(i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_x_e8m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.x.nxv16i8.iXLen.i8.iXLen(iXLen 3, iXLen 31, i8 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.x.nxv16i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) + +define @test_sf_vc_v_x_e8m4(i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_x_e8m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.x.nxv32i8.iXLen.i8.iXLen(iXLen 3, iXLen 31, i8 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.x.nxv32i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) + +define @test_sf_vc_v_x_e8m8(i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_x_e8m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.x.nxv64i8.iXLen.i8.iXLen(iXLen 3, iXLen 31, i8 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.x.nxv64i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) + +define @test_sf_vc_v_x_e16mf4(i16 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_x_e16mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.x.nxv1i16.iXLen.i16.iXLen(iXLen 3, iXLen 31, i16 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.x.nxv1i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) + +define @test_sf_vc_v_x_e16mf2(i16 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_x_e16mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.x.nxv2i16.iXLen.i16.iXLen(iXLen 3, iXLen 31, i16 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.x.nxv2i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) + +define @test_sf_vc_v_x_e16m1(i16 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_x_e16m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.x.nxv4i16.iXLen.i16.iXLen(iXLen 3, iXLen 31, i16 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.x.nxv4i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) + +define @test_sf_vc_v_x_e16m2(i16 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_x_e16m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.x.nxv8i16.iXLen.i16.iXLen(iXLen 3, iXLen 31, i16 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.x.nxv8i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) + +define @test_sf_vc_v_x_e16m4(i16 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_x_e16m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.x.nxv16i16.iXLen.i16.iXLen(iXLen 3, iXLen 31, i16 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.x.nxv16i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) + +define @test_sf_vc_v_x_e16m8(i16 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_x_e16m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.x.nxv32i16.iXLen.i16.iXLen(iXLen 3, iXLen 31, i16 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.x.nxv32i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) + +define @test_sf_vc_v_x_e32mf2(i32 signext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_x_e32mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.x.nxv1i32.iXLen.i32.iXLen(iXLen 3, iXLen 31, i32 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.x.nxv1i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen) + +define @test_sf_vc_v_x_e32m1(i32 signext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_x_e32m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.x.nxv2i32.iXLen.i32.iXLen(iXLen 3, iXLen 31, i32 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.x.nxv2i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen) + +define @test_sf_vc_v_x_e32m2(i32 signext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_x_e32m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.x.nxv4i32.iXLen.i32.iXLen(iXLen 3, iXLen 31, i32 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.x.nxv4i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen) + +define @test_sf_vc_v_x_e32m4(i32 signext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_x_e32m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.x.nxv8i32.iXLen.i32.iXLen(iXLen 3, iXLen 31, i32 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.x.nxv8i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen) + +define @test_sf_vc_v_x_e32m8(i32 signext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_x_e32m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.x.nxv16i32.iXLen.i32.iXLen(iXLen 3, iXLen 31, i32 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.x.nxv16i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen) + +define void @test_sf_vc_i_se_e8mf8(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_i_se_e8mf8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: sf.vc.i 3, 31, 31, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.i.se.e8mf8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.i.se.e8mf8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen) + +define void @test_sf_vc_i_se_e8mf4(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_i_se_e8mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: sf.vc.i 3, 31, 31, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.i.se.e8mf4.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.i.se.e8mf4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen) + +define void @test_sf_vc_i_se_e8mf2(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_i_se_e8mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: sf.vc.i 3, 31, 31, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.i.se.e8mf2.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.i.se.e8mf2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen) + +define void @test_sf_vc_i_se_e8m1(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_i_se_e8m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: sf.vc.i 3, 31, 31, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.i.se.e8m1.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.i.se.e8m1.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen) + +define void @test_sf_vc_i_se_e8m2(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_i_se_e8m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: sf.vc.i 3, 31, 31, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.i.se.e8m2.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.i.se.e8m2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen) + +define void @test_sf_vc_i_se_e8m4(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_i_se_e8m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: sf.vc.i 3, 31, 31, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.i.se.e8m4.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.i.se.e8m4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen) + +define void @test_sf_vc_i_se_e8m8(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_i_se_e8m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: sf.vc.i 3, 31, 31, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.i.se.e8m8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.i.se.e8m8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen) + +define void @test_sf_vc_i_se_e16mf4(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_i_se_e16mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.i 3, 31, 31, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.i.se.e16mf4.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.i.se.e16mf4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen) + +define void @test_sf_vc_i_se_e16mf2(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_i_se_e16mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.i 3, 31, 31, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.i.se.e16mf2.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.i.se.e16mf2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen) + +define void @test_sf_vc_i_se_e16m1(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_i_se_e16m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.i 3, 31, 31, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.i.se.e16m1.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.i.se.e16m1.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen) + +define void @test_sf_vc_i_se_e16m2(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_i_se_e16m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.i 3, 31, 31, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.i.se.e16m2.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.i.se.e16m2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen) + +define void @test_sf_vc_i_se_e16m4(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_i_se_e16m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: sf.vc.i 3, 31, 31, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.i.se.e16m4.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.i.se.e16m4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen) + +define void @test_sf_vc_i_se_e16m8(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_i_se_e16m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: sf.vc.i 3, 31, 31, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.i.se.e16m8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.i.se.e16m8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen) + +define void @test_sf_vc_i_se_e32mf2(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_i_se_e32mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.i 3, 31, 31, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.i.se.e32mf2.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.i.se.e32mf2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen) + +define void @test_sf_vc_i_se_e32m1(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_i_se_e32m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: sf.vc.i 3, 31, 31, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.i.se.e32m1.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.i.se.e32m1.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen) + +define void @test_sf_vc_i_se_e32m2(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_i_se_e32m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.i 3, 31, 31, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.i.se.e32m2.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.i.se.e32m2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen) + +define void @test_sf_vc_i_se_e32m4(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_i_se_e32m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.i 3, 31, 31, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.i.se.e32m4.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.i.se.e32m4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen) + +define void @test_sf_vc_i_se_e32m8(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_i_se_e32m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: sf.vc.i 3, 31, 31, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.i.se.e32m8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.i.se.e32m8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen) + +define void @test_sf_vc_i_se_e64m1(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_i_se_e64m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: sf.vc.i 3, 31, 31, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.i.se.e64m1.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.i.se.e64m1.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen) + +define void @test_sf_vc_i_se_e64m2(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_i_se_e64m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: sf.vc.i 3, 31, 31, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.i.se.e64m2.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.i.se.e64m2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen) + +define void @test_sf_vc_i_se_e64m4(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_i_se_e64m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: sf.vc.i 3, 31, 31, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.i.se.e64m4.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.i.se.e64m4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen) + +define void @test_sf_vc_i_se_e64m8(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_i_se_e64m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: sf.vc.i 3, 31, 31, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.i.se.e64m8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.i.se.e64m8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen) + +define @test_sf_vc_v_i_se_e8mf8(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_i_se_e8mf8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.i.se.nxv1i8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.i.se.nxv1i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) + +define @test_sf_vc_v_i_se_e8mf4(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_i_se_e8mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.i.se.nxv2i8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.i.se.nxv2i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) + +define @test_sf_vc_v_i_se_e8mf2(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_i_se_e8mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.i.se.nxv4i8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.i.se.nxv4i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) + +define @test_sf_vc_v_i_se_e8m1(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_i_se_e8m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.i.se.nxv8i8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.i.se.nxv8i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) + +define @test_sf_vc_v_i_se_e8m2(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_i_se_e8m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.i.se.nxv16i8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.i.se.nxv16i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) + +define @test_sf_vc_v_i_se_e8m4(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_i_se_e8m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.i.se.nxv32i8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.i.se.nxv32i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) + +define @test_sf_vc_v_i_se_e8m8(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_i_se_e8m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.i.se.nxv64i8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.i.se.nxv64i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) + +define @test_sf_vc_v_i_se_e16mf4(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_i_se_e16mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.i.se.nxv1i16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.i.se.nxv1i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) + +define @test_sf_vc_v_i_se_e16mf2(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_i_se_e16mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.i.se.nxv2i16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.i.se.nxv2i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) + +define @test_sf_vc_v_i_se_e16m1(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_i_se_e16m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.i.se.nxv4i16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.i.se.nxv4i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) + +define @test_sf_vc_v_i_se_e16m2(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_i_se_e16m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.i.se.nxv8i16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.i.se.nxv8i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) + +define @test_sf_vc_v_i_se_e16m4(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_i_se_e16m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.i.se.nxv16i16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.i.se.nxv16i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) + +define @test_sf_vc_v_i_se_e16m8(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_i_se_e16m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.i.se.nxv32i16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.i.se.nxv32i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) + +define @test_sf_vc_v_i_se_e32mf2(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_i_se_e32mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.i.se.nxv1i32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.i.se.nxv1i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) + +define @test_sf_vc_v_i_se_e32m1(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_i_se_e32m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.i.se.nxv2i32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.i.se.nxv2i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) + +define @test_sf_vc_v_i_se_e32m2(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_i_se_e32m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.i.se.nxv4i32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.i.se.nxv4i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) + +define @test_sf_vc_v_i_se_e32m4(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_i_se_e32m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.i.se.nxv8i32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.i.se.nxv8i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) + +define @test_sf_vc_v_i_se_e32m8(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_i_se_e32m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.i.se.nxv16i32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.i.se.nxv16i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) + +define @test_sf_vc_v_i_se_e64m1(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_i_se_e64m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.i.se.nxv1i64.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.i.se.nxv1i64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) + +define @test_sf_vc_v_i_se_e64m2(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_i_se_e64m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.i.se.nxv2i64.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.i.se.nxv2i64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) + +define @test_sf_vc_v_i_se_e64m4(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_i_se_e64m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.i.se.nxv4i64.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.i.se.nxv4i64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) + +define @test_sf_vc_v_i_se_e64m8(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_i_se_e64m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.i.se.nxv8i64.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.i.se.nxv8i64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) + +define @test_sf_vc_v_i_e8mf8(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_i_e8mf8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.i.nxv1i8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.i.nxv1i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) + +define @test_sf_vc_v_i_e8mf4(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_i_e8mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.i.nxv2i8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.i.nxv2i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) + +define @test_sf_vc_v_i_e8mf2(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_i_e8mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.i.nxv4i8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.i.nxv4i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) + +define @test_sf_vc_v_i_e8m1(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_i_e8m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.i.nxv8i8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.i.nxv8i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) + +define @test_sf_vc_v_i_e8m2(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_i_e8m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.i.nxv16i8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.i.nxv16i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) + +define @test_sf_vc_v_i_e8m4(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_i_e8m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.i.nxv32i8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.i.nxv32i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) + +define @test_sf_vc_v_i_e8m8(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_i_e8m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.i.nxv64i8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.i.nxv64i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) + +define @test_sf_vc_v_i_e16mf4(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_i_e16mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.i.nxv1i16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.i.nxv1i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) + +define @test_sf_vc_v_i_e16mf2(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_i_e16mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.i.nxv2i16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.i.nxv2i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) + +define @test_sf_vc_v_i_e16m1(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_i_e16m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.i.nxv4i16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.i.nxv4i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) + +define @test_sf_vc_v_i_e16m2(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_i_e16m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.i.nxv8i16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.i.nxv8i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) + +define @test_sf_vc_v_i_e16m4(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_i_e16m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.i.nxv16i16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.i.nxv16i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) + +define @test_sf_vc_v_i_e16m8(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_i_e16m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.i.nxv32i16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.i.nxv32i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) + +define @test_sf_vc_v_i_e32mf2(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_i_e32mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.i.nxv1i32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.i.nxv1i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) + +define @test_sf_vc_v_i_e32m1(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_i_e32m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.i.nxv2i32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.i.nxv2i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) + +define @test_sf_vc_v_i_e32m2(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_i_e32m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.i.nxv4i32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.i.nxv4i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) + +define @test_sf_vc_v_i_e32m4(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_i_e32m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.i.nxv8i32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.i.nxv8i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) + +define @test_sf_vc_v_i_e32m8(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_i_e32m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.i.nxv16i32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.i.nxv16i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) + +define @test_sf_vc_v_i_e64m1(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_i_e64m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.i.nxv1i64.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.i.nxv1i64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) + +define @test_sf_vc_v_i_e64m2(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_i_e64m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.i.nxv2i64.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.i.nxv2i64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) + +define @test_sf_vc_v_i_e64m4(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_i_e64m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.i.nxv4i64.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.i.nxv4i64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) + +define @test_sf_vc_v_i_e64m8(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_i_e64m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.i.nxv8i64.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.i.nxv8i64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) diff --git a/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xv.ll b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xv.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xv.ll @@ -0,0 +1,3008 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+xsfvcp \ +; RUN: -verify-machineinstrs | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+xsfvcp \ +; RUN: -verify-machineinstrs | FileCheck %s + +define void @test_sf_vc_vv_se_e8mf8( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_vv_se_e8mf8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1i8.nxv1i8.iXLen(iXLen 3, iXLen 31, %vs2, %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1i8.nxv1i8.iXLen(iXLen, iXLen, , , iXLen) + +define void @test_sf_vc_vv_se_e8mf4( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_vv_se_e8mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2i8.nxv2i8.iXLen(iXLen 3, iXLen 31, %vs2, %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2i8.nxv2i8.iXLen(iXLen, iXLen, , , iXLen) + +define void @test_sf_vc_vv_se_e8mf2( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_vv_se_e8mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4i8.nxv4i8.iXLen(iXLen 3, iXLen 31, %vs2, %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4i8.nxv4i8.iXLen(iXLen, iXLen, , , iXLen) + +define void @test_sf_vc_vv_se_e8m1( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_vv_se_e8m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8i8.nxv8i8.iXLen(iXLen 3, iXLen 31, %vs2, %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8i8.nxv8i8.iXLen(iXLen, iXLen, , , iXLen) + +define void @test_sf_vc_vv_se_e8m2( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_vv_se_e8m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: sf.vc.vv 3, 31, v8, v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv16i8.nxv16i8.iXLen(iXLen 3, iXLen 31, %vs2, %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv16i8.nxv16i8.iXLen(iXLen, iXLen, , , iXLen) + +define void @test_sf_vc_vv_se_e8m4( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_vv_se_e8m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: sf.vc.vv 3, 31, v8, v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv32i8.nxv32i8.iXLen(iXLen 3, iXLen 31, %vs2, %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv32i8.nxv32i8.iXLen(iXLen, iXLen, , , iXLen) + +define void @test_sf_vc_vv_se_e8m8( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_vv_se_e8m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: sf.vc.vv 3, 31, v8, v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv64i8.nxv64i8.iXLen(iXLen 3, iXLen 31, %vs2, %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv64i8.nxv64i8.iXLen(iXLen, iXLen, , , iXLen) + +define void @test_sf_vc_vv_se_e16mf4( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_vv_se_e16mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1i16.nxv1i16.iXLen(iXLen 3, iXLen 31, %vs2, %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1i16.nxv1i16.iXLen(iXLen, iXLen, , , iXLen) + +define void @test_sf_vc_vv_se_e16mf2( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_vv_se_e16mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2i16.nxv2i16.iXLen(iXLen 3, iXLen 31, %vs2, %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2i16.nxv2i16.iXLen(iXLen, iXLen, , , iXLen) + +define void @test_sf_vc_vv_se_e16m1( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_vv_se_e16m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4i16.nxv4i16.iXLen(iXLen 3, iXLen 31, %vs2, %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4i16.nxv4i16.iXLen(iXLen, iXLen, , , iXLen) + +define void @test_sf_vc_vv_se_e16m2( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_vv_se_e16m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.vv 3, 31, v8, v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8i16.nxv8i16.iXLen(iXLen 3, iXLen 31, %vs2, %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8i16.nxv8i16.iXLen(iXLen, iXLen, , , iXLen) + +define void @test_sf_vc_vv_se_e16m4( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_vv_se_e16m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: sf.vc.vv 3, 31, v8, v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv16i16.nxv16i16.iXLen(iXLen 3, iXLen 31, %vs2, %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv16i16.nxv16i16.iXLen(iXLen, iXLen, , , iXLen) + +define void @test_sf_vc_vv_se_e16m8( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_vv_se_e16m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: sf.vc.vv 3, 31, v8, v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv32i16.nxv32i16.iXLen(iXLen 3, iXLen 31, %vs2, %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv32i16.nxv32i16.iXLen(iXLen, iXLen, , , iXLen) + +define void @test_sf_vc_vv_se_e32mf2( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_vv_se_e32mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1i32.nxv1i32.iXLen(iXLen 3, iXLen 31, %vs2, %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1i32.nxv1i32.iXLen(iXLen, iXLen, , , iXLen) + +define void @test_sf_vc_vv_se_e32m1( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_vv_se_e32m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2i32.nxv2i32.iXLen(iXLen 3, iXLen 31, %vs2, %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2i32.nxv2i32.iXLen(iXLen, iXLen, , , iXLen) + +define void @test_sf_vc_vv_se_e32m2( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_vv_se_e32m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.vv 3, 31, v8, v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4i32.nxv4i32.iXLen(iXLen 3, iXLen 31, %vs2, %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4i32.nxv4i32.iXLen(iXLen, iXLen, , , iXLen) + +define void @test_sf_vc_vv_se_e32m4( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_vv_se_e32m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.vv 3, 31, v8, v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8i32.nxv8i32.iXLen(iXLen 3, iXLen 31, %vs2, %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8i32.nxv8i32.iXLen(iXLen, iXLen, , , iXLen) + +define void @test_sf_vc_vv_se_e32m8( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_vv_se_e32m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: sf.vc.vv 3, 31, v8, v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv16i32.nxv16i32.iXLen(iXLen 3, iXLen 31, %vs2, %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv16i32.nxv16i32.iXLen(iXLen, iXLen, , , iXLen) + +define void @test_sf_vc_vv_se_e64m1( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_vv_se_e64m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1i64.nxv1i64.iXLen(iXLen 3, iXLen 31, %vs2, %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1i64.nxv1i64.iXLen(iXLen, iXLen, , , iXLen) + +define void @test_sf_vc_vv_se_e64m2( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_vv_se_e64m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: sf.vc.vv 3, 31, v8, v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2i64.nxv2i64.iXLen(iXLen 3, iXLen 31, %vs2, %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2i64.nxv2i64.iXLen(iXLen, iXLen, , , iXLen) + +define void @test_sf_vc_vv_se_e64m4( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_vv_se_e64m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: sf.vc.vv 3, 31, v8, v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4i64.nxv4i64.iXLen(iXLen 3, iXLen 31, %vs2, %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4i64.nxv4i64.iXLen(iXLen, iXLen, , , iXLen) + +define void @test_sf_vc_vv_se_e64m8( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_vv_se_e64m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: sf.vc.vv 3, 31, v8, v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8i64.nxv8i64.iXLen(iXLen 3, iXLen 31, %vs2, %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8i64.nxv8i64.iXLen(iXLen, iXLen, , , iXLen) + +define @test_sf_vc_v_vv_se_e8mf8( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vv_se_e8mf8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vv.se.nxv1i8.iXLen.nxv1i8.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vv.se.nxv1i8.iXLen.nxv1i8.iXLen(iXLen, , , iXLen) + +define @test_sf_vc_v_vv_se_e8mf4( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vv_se_e8mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vv.se.nxv2i8.iXLen.nxv2i8.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vv.se.nxv2i8.iXLen.nxv2i8.iXLen(iXLen, , , iXLen) + +define @test_sf_vc_v_vv_se_e8mf2( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vv_se_e8mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vv.se.nxv4i8.iXLen.nxv4i8.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vv.se.nxv4i8.iXLen.nxv4i8.iXLen(iXLen, , , iXLen) + +define @test_sf_vc_v_vv_se_e8m1( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vv_se_e8m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vv.se.nxv8i8.iXLen.nxv8i8.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vv.se.nxv8i8.iXLen.nxv8i8.iXLen(iXLen, , , iXLen) + +define @test_sf_vc_v_vv_se_e8m2( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vv_se_e8m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vv.se.nxv16i8.iXLen.nxv16i8.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vv.se.nxv16i8.iXLen.nxv16i8.iXLen(iXLen, , , iXLen) + +define @test_sf_vc_v_vv_se_e8m4( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vv_se_e8m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vv.se.nxv32i8.iXLen.nxv32i8.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vv.se.nxv32i8.iXLen.nxv32i8.iXLen(iXLen, , , iXLen) + +define @test_sf_vc_v_vv_se_e8m8( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vv_se_e8m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v16 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vv.se.nxv64i8.iXLen.nxv64i8.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vv.se.nxv64i8.iXLen.nxv64i8.iXLen(iXLen, , , iXLen) + +define @test_sf_vc_v_vv_se_e16mf4( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vv_se_e16mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vv.se.nxv1i16.iXLen.nxv1i16.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vv.se.nxv1i16.iXLen.nxv1i16.iXLen(iXLen, , , iXLen) + +define @test_sf_vc_v_vv_se_e16mf2( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vv_se_e16mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vv.se.nxv2i16.iXLen.nxv2i16.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vv.se.nxv2i16.iXLen.nxv2i16.iXLen(iXLen, , , iXLen) + +define @test_sf_vc_v_vv_se_e16m1( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vv_se_e16m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vv.se.nxv4i16.iXLen.nxv4i16.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vv.se.nxv4i16.iXLen.nxv4i16.iXLen(iXLen, , , iXLen) + +define @test_sf_vc_v_vv_se_e16m2( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vv_se_e16m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vv.se.nxv8i16.iXLen.nxv8i16.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vv.se.nxv8i16.iXLen.nxv8i16.iXLen(iXLen, , , iXLen) + +define @test_sf_vc_v_vv_se_e16m4( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vv_se_e16m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vv.se.nxv16i16.iXLen.nxv16i16.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vv.se.nxv16i16.iXLen.nxv16i16.iXLen(iXLen, , , iXLen) + +define @test_sf_vc_v_vv_se_e16m8( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vv_se_e16m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v16 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vv.se.nxv32i16.iXLen.nxv32i16.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vv.se.nxv32i16.iXLen.nxv32i16.iXLen(iXLen, , , iXLen) + +define @test_sf_vc_v_vv_se_e32mf2( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vv_se_e32mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vv.se.nxv1i32.iXLen.nxv1i32.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vv.se.nxv1i32.iXLen.nxv1i32.iXLen(iXLen, , , iXLen) + +define @test_sf_vc_v_vv_se_e32m1( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vv_se_e32m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vv.se.nxv2i32.iXLen.nxv2i32.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vv.se.nxv2i32.iXLen.nxv2i32.iXLen(iXLen, , , iXLen) + +define @test_sf_vc_v_vv_se_e32m2( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vv_se_e32m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vv.se.nxv4i32.iXLen.nxv4i32.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vv.se.nxv4i32.iXLen.nxv4i32.iXLen(iXLen, , , iXLen) + +define @test_sf_vc_v_vv_se_e32m4( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vv_se_e32m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vv.se.nxv8i32.iXLen.nxv8i32.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vv.se.nxv8i32.iXLen.nxv8i32.iXLen(iXLen, , , iXLen) + +define @test_sf_vc_v_vv_se_e32m8( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vv_se_e32m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v16 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vv.se.nxv16i32.iXLen.nxv16i32.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vv.se.nxv16i32.iXLen.nxv16i32.iXLen(iXLen, , , iXLen) + +define @test_sf_vc_v_vv_se_e64m1( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vv_se_e64m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vv.se.nxv1i64.iXLen.nxv1i64.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vv.se.nxv1i64.iXLen.nxv1i64.iXLen(iXLen, , , iXLen) + +define @test_sf_vc_v_vv_se_e64m2( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vv_se_e64m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vv.se.nxv2i64.iXLen.nxv2i64.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vv.se.nxv2i64.iXLen.nxv2i64.iXLen(iXLen, , , iXLen) + +define @test_sf_vc_v_vv_se_e64m4( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vv_se_e64m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vv.se.nxv4i64.iXLen.nxv4i64.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vv.se.nxv4i64.iXLen.nxv4i64.iXLen(iXLen, , , iXLen) + +define @test_sf_vc_v_vv_se_e64m8( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vv_se_e64m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v16 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vv.se.nxv8i64.iXLen.nxv8i64.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vv.se.nxv8i64.iXLen.nxv8i64.iXLen(iXLen, , , iXLen) + +define @test_sf_vc_v_vv_e8mf8( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vv_e8mf8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vv.nxv1i8.iXLen.nxv1i8.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vv.nxv1i8.iXLen.nxv1i8.iXLen(iXLen, , , iXLen) + +define @test_sf_vc_v_vv_e8mf4( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vv_e8mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vv.nxv2i8.iXLen.nxv2i8.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vv.nxv2i8.iXLen.nxv2i8.iXLen(iXLen, , , iXLen) + +define @test_sf_vc_v_vv_e8mf2( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vv_e8mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vv.nxv4i8.iXLen.nxv4i8.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vv.nxv4i8.iXLen.nxv4i8.iXLen(iXLen, , , iXLen) + +define @test_sf_vc_v_vv_e8m1( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vv_e8m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vv.nxv8i8.iXLen.nxv8i8.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vv.nxv8i8.iXLen.nxv8i8.iXLen(iXLen, , , iXLen) + +define @test_sf_vc_v_vv_e8m2( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vv_e8m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vv.nxv16i8.iXLen.nxv16i8.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vv.nxv16i8.iXLen.nxv16i8.iXLen(iXLen, , , iXLen) + +define @test_sf_vc_v_vv_e8m4( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vv_e8m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vv.nxv32i8.iXLen.nxv32i8.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vv.nxv32i8.iXLen.nxv32i8.iXLen(iXLen, , , iXLen) + +define @test_sf_vc_v_vv_e8m8( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vv_e8m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v16 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vv.nxv64i8.iXLen.nxv64i8.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vv.nxv64i8.iXLen.nxv64i8.iXLen(iXLen, , , iXLen) + +define @test_sf_vc_v_vv_e16mf4( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vv_e16mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vv.nxv1i16.iXLen.nxv1i16.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vv.nxv1i16.iXLen.nxv1i16.iXLen(iXLen, , , iXLen) + +define @test_sf_vc_v_vv_e16mf2( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vv_e16mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vv.nxv2i16.iXLen.nxv2i16.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vv.nxv2i16.iXLen.nxv2i16.iXLen(iXLen, , , iXLen) + +define @test_sf_vc_v_vv_e16m1( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vv_e16m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vv.nxv4i16.iXLen.nxv4i16.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vv.nxv4i16.iXLen.nxv4i16.iXLen(iXLen, , , iXLen) + +define @test_sf_vc_v_vv_e16m2( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vv_e16m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vv.nxv8i16.iXLen.nxv8i16.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vv.nxv8i16.iXLen.nxv8i16.iXLen(iXLen, , , iXLen) + +define @test_sf_vc_v_vv_e16m4( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vv_e16m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vv.nxv16i16.iXLen.nxv16i16.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vv.nxv16i16.iXLen.nxv16i16.iXLen(iXLen, , , iXLen) + +define @test_sf_vc_v_vv_e16m8( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vv_e16m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v16 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vv.nxv32i16.iXLen.nxv32i16.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vv.nxv32i16.iXLen.nxv32i16.iXLen(iXLen, , , iXLen) + +define @test_sf_vc_v_vv_e32mf2( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vv_e32mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vv.nxv1i32.iXLen.nxv1i32.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vv.nxv1i32.iXLen.nxv1i32.iXLen(iXLen, , , iXLen) + +define @test_sf_vc_v_vv_e32m1( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vv_e32m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vv.nxv2i32.iXLen.nxv2i32.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vv.nxv2i32.iXLen.nxv2i32.iXLen(iXLen, , , iXLen) + +define @test_sf_vc_v_vv_e32m2( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vv_e32m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vv.nxv4i32.iXLen.nxv4i32.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vv.nxv4i32.iXLen.nxv4i32.iXLen(iXLen, , , iXLen) + +define @test_sf_vc_v_vv_e32m4( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vv_e32m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vv.nxv8i32.iXLen.nxv8i32.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vv.nxv8i32.iXLen.nxv8i32.iXLen(iXLen, , , iXLen) + +define @test_sf_vc_v_vv_e32m8( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vv_e32m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v16 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vv.nxv16i32.iXLen.nxv16i32.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vv.nxv16i32.iXLen.nxv16i32.iXLen(iXLen, , , iXLen) + +define @test_sf_vc_v_vv_e64m1( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vv_e64m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vv.nxv1i64.iXLen.nxv1i64.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vv.nxv1i64.iXLen.nxv1i64.iXLen(iXLen, , , iXLen) + +define @test_sf_vc_v_vv_e64m2( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vv_e64m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vv.nxv2i64.iXLen.nxv2i64.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vv.nxv2i64.iXLen.nxv2i64.iXLen(iXLen, , , iXLen) + +define @test_sf_vc_v_vv_e64m4( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vv_e64m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vv.nxv4i64.iXLen.nxv4i64.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vv.nxv4i64.iXLen.nxv4i64.iXLen(iXLen, , , iXLen) + +define @test_sf_vc_v_vv_e64m8( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vv_e64m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v16 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vv.nxv8i64.iXLen.nxv8i64.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vv.nxv8i64.iXLen.nxv8i64.iXLen(iXLen, , , iXLen) + +define void @test_sf_vc_xv_se_e8mf8( %vs2, i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_xv_se_e8mf8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv1i8.i8.iXLen(iXLen 3, iXLen 31, %vs2, i8 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv1i8.i8.iXLen(iXLen, iXLen, , i8, iXLen) + +define void @test_sf_vc_xv_se_e8mf4( %vs2, i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_xv_se_e8mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv2i8.i8.iXLen(iXLen 3, iXLen 31, %vs2, i8 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv2i8.i8.iXLen(iXLen, iXLen, , i8, iXLen) + +define void @test_sf_vc_xv_se_e8mf2( %vs2, i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_xv_se_e8mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv4i8.i8.iXLen(iXLen 3, iXLen 31, %vs2, i8 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv4i8.i8.iXLen(iXLen, iXLen, , i8, iXLen) + +define void @test_sf_vc_xv_se_e8m1( %vs2, i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_xv_se_e8m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv8i8.i8.iXLen(iXLen 3, iXLen 31, %vs2, i8 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv8i8.i8.iXLen(iXLen, iXLen, , i8, iXLen) + +define void @test_sf_vc_xv_se_e8m2( %vs2, i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_xv_se_e8m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv16i8.i8.iXLen(iXLen 3, iXLen 31, %vs2, i8 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv16i8.i8.iXLen(iXLen, iXLen, , i8, iXLen) + +define void @test_sf_vc_xv_se_e8m4( %vs2, i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_xv_se_e8m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv32i8.i8.iXLen(iXLen 3, iXLen 31, %vs2, i8 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv32i8.i8.iXLen(iXLen, iXLen, , i8, iXLen) + +define void @test_sf_vc_xv_se_e8m8( %vs2, i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_xv_se_e8m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv64i8.i8.iXLen(iXLen 3, iXLen 31, %vs2, i8 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv64i8.i8.iXLen(iXLen, iXLen, , i8, iXLen) + +define void @test_sf_vc_xv_se_e16mf4( %vs2, i16 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_xv_se_e16mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv1i16.i16.iXLen(iXLen 3, iXLen 31, %vs2, i16 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv1i16.i16.iXLen(iXLen, iXLen, , i16, iXLen) + +define void @test_sf_vc_xv_se_e16mf2( %vs2, i16 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_xv_se_e16mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv2i16.i16.iXLen(iXLen 3, iXLen 31, %vs2, i16 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv2i16.i16.iXLen(iXLen, iXLen, , i16, iXLen) + +define void @test_sf_vc_xv_se_e16m1( %vs2, i16 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_xv_se_e16m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv4i16.i16.iXLen(iXLen 3, iXLen 31, %vs2, i16 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv4i16.i16.iXLen(iXLen, iXLen, , i16, iXLen) + +define void @test_sf_vc_xv_se_e16m2( %vs2, i16 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_xv_se_e16m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv8i16.i16.iXLen(iXLen 3, iXLen 31, %vs2, i16 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv8i16.i16.iXLen(iXLen, iXLen, , i16, iXLen) + +define void @test_sf_vc_xv_se_e16m4( %vs2, i16 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_xv_se_e16m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv16i16.i16.iXLen(iXLen 3, iXLen 31, %vs2, i16 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv16i16.i16.iXLen(iXLen, iXLen, , i16, iXLen) + +define void @test_sf_vc_xv_se_e16m8( %vs2, i16 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_xv_se_e16m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv32i16.i16.iXLen(iXLen 3, iXLen 31, %vs2, i16 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv32i16.i16.iXLen(iXLen, iXLen, , i16, iXLen) + +define void @test_sf_vc_xv_se_e32mf2( %vs2, i32 signext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_xv_se_e32mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv1i32.i32.iXLen(iXLen 3, iXLen 31, %vs2, i32 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv1i32.i32.iXLen(iXLen, iXLen, , i32, iXLen) + +define void @test_sf_vc_xv_se_e32m1( %vs2, i32 signext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_xv_se_e32m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv2i32.i32.iXLen(iXLen 3, iXLen 31, %vs2, i32 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv2i32.i32.iXLen(iXLen, iXLen, , i32, iXLen) + +define void @test_sf_vc_xv_se_e32m2( %vs2, i32 signext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_xv_se_e32m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv4i32.i32.iXLen(iXLen 3, iXLen 31, %vs2, i32 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv4i32.i32.iXLen(iXLen, iXLen, , i32, iXLen) + +define void @test_sf_vc_xv_se_e32m4( %vs2, i32 signext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_xv_se_e32m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv8i32.i32.iXLen(iXLen 3, iXLen 31, %vs2, i32 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv8i32.i32.iXLen(iXLen, iXLen, , i32, iXLen) + +define void @test_sf_vc_xv_se_e32m8( %vs2, i32 signext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_xv_se_e32m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv16i32.i32.iXLen(iXLen 3, iXLen 31, %vs2, i32 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv16i32.i32.iXLen(iXLen, iXLen, , i32, iXLen) + +define @test_sf_vc_v_xv_se_e8mf8( %vs2, i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xv_se_e8mf8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xv.se.nxv1i8.iXLen.i8.iXLen(iXLen 3, %vs2, i8 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xv.se.nxv1i8.iXLen.i8.iXLen(iXLen, , i8, iXLen) + +define @test_sf_vc_v_xv_se_e8mf4( %vs2, i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xv_se_e8mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xv.se.nxv2i8.iXLen.i8.iXLen(iXLen 3, %vs2, i8 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xv.se.nxv2i8.iXLen.i8.iXLen(iXLen, , i8, iXLen) + +define @test_sf_vc_v_xv_se_e8mf2( %vs2, i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xv_se_e8mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xv.se.nxv4i8.iXLen.i8.iXLen(iXLen 3, %vs2, i8 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xv.se.nxv4i8.iXLen.i8.iXLen(iXLen, , i8, iXLen) + +define @test_sf_vc_v_xv_se_e8m1( %vs2, i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xv_se_e8m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xv.se.nxv8i8.iXLen.i8.iXLen(iXLen 3, %vs2, i8 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xv.se.nxv8i8.iXLen.i8.iXLen(iXLen, , i8, iXLen) + +define @test_sf_vc_v_xv_se_e8m2( %vs2, i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xv_se_e8m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xv.se.nxv16i8.iXLen.i8.iXLen(iXLen 3, %vs2, i8 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xv.se.nxv16i8.iXLen.i8.iXLen(iXLen, , i8, iXLen) + +define @test_sf_vc_v_xv_se_e8m4( %vs2, i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xv_se_e8m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xv.se.nxv32i8.iXLen.i8.iXLen(iXLen 3, %vs2, i8 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xv.se.nxv32i8.iXLen.i8.iXLen(iXLen, , i8, iXLen) + +define @test_sf_vc_v_xv_se_e8m8( %vs2, i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xv_se_e8m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xv.se.nxv64i8.iXLen.i8.iXLen(iXLen 3, %vs2, i8 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xv.se.nxv64i8.iXLen.i8.iXLen(iXLen, , i8, iXLen) + +define @test_sf_vc_v_xv_se_e16mf4( %vs2, i16 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xv_se_e16mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xv.se.nxv1i16.iXLen.i16.iXLen(iXLen 3, %vs2, i16 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xv.se.nxv1i16.iXLen.i16.iXLen(iXLen, , i16, iXLen) + +define @test_sf_vc_v_xv_se_e16mf2( %vs2, i16 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xv_se_e16mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xv.se.nxv2i16.iXLen.i16.iXLen(iXLen 3, %vs2, i16 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xv.se.nxv2i16.iXLen.i16.iXLen(iXLen, , i16, iXLen) + +define @test_sf_vc_v_xv_se_e16m1( %vs2, i16 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xv_se_e16m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xv.se.nxv4i16.iXLen.i16.iXLen(iXLen 3, %vs2, i16 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xv.se.nxv4i16.iXLen.i16.iXLen(iXLen, , i16, iXLen) + +define @test_sf_vc_v_xv_se_e16m2( %vs2, i16 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xv_se_e16m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xv.se.nxv8i16.iXLen.i16.iXLen(iXLen 3, %vs2, i16 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xv.se.nxv8i16.iXLen.i16.iXLen(iXLen, , i16, iXLen) + +define @test_sf_vc_v_xv_se_e16m4( %vs2, i16 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xv_se_e16m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xv.se.nxv16i16.iXLen.i16.iXLen(iXLen 3, %vs2, i16 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xv.se.nxv16i16.iXLen.i16.iXLen(iXLen, , i16, iXLen) + +define @test_sf_vc_v_xv_se_e16m8( %vs2, i16 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xv_se_e16m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xv.se.nxv32i16.iXLen.i16.iXLen(iXLen 3, %vs2, i16 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xv.se.nxv32i16.iXLen.i16.iXLen(iXLen, , i16, iXLen) + +define @test_sf_vc_v_xv_se_e32mf2( %vs2, i32 signext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xv_se_e32mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xv.se.nxv1i32.i32.i32.iXLen(iXLen 3, %vs2, i32 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xv.se.nxv1i32.i32.i32.iXLen(iXLen, , i32, iXLen) + +define @test_sf_vc_v_xv_se_e32m1( %vs2, i32 signext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xv_se_e32m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xv.se.nxv2i32.i32.i32.iXLen(iXLen 3, %vs2, i32 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xv.se.nxv2i32.i32.i32.iXLen(iXLen, , i32, iXLen) + +define @test_sf_vc_v_xv_se_e32m2( %vs2, i32 signext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xv_se_e32m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xv.se.nxv4i32.i32.i32.iXLen(iXLen 3, %vs2, i32 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xv.se.nxv4i32.i32.i32.iXLen(iXLen, , i32, iXLen) + +define @test_sf_vc_v_xv_se_e32m4( %vs2, i32 signext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xv_se_e32m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xv.se.nxv8i32.i32.i32.iXLen(iXLen 3, %vs2, i32 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xv.se.nxv8i32.i32.i32.iXLen(iXLen, , i32, iXLen) + +define @test_sf_vc_v_xv_se_e32m8( %vs2, i32 signext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xv_se_e32m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xv.se.nxv16i32.i32.i32.iXLen(iXLen 3, %vs2, i32 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xv.se.nxv16i32.i32.i32.iXLen(iXLen, , i32, iXLen) + +define @test_sf_vc_v_xv_e8mf8( %vs2, i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xv_e8mf8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xv.nxv1i8.iXLen.i8.iXLen(iXLen 3, %vs2, i8 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xv.nxv1i8.iXLen.i8.iXLen(iXLen, , i8, iXLen) + +define @test_sf_vc_v_xv_e8mf4( %vs2, i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xv_e8mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xv.nxv2i8.iXLen.i8.iXLen(iXLen 3, %vs2, i8 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xv.nxv2i8.iXLen.i8.iXLen(iXLen, , i8, iXLen) + +define @test_sf_vc_v_xv_e8mf2( %vs2, i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xv_e8mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xv.nxv4i8.iXLen.i8.iXLen(iXLen 3, %vs2, i8 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xv.nxv4i8.iXLen.i8.iXLen(iXLen, , i8, iXLen) + +define @test_sf_vc_v_xv_e8m1( %vs2, i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xv_e8m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xv.nxv8i8.iXLen.i8.iXLen(iXLen 3, %vs2, i8 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xv.nxv8i8.iXLen.i8.iXLen(iXLen, , i8, iXLen) + +define @test_sf_vc_v_xv_e8m2( %vs2, i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xv_e8m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xv.nxv16i8.iXLen.i8.iXLen(iXLen 3, %vs2, i8 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xv.nxv16i8.iXLen.i8.iXLen(iXLen, , i8, iXLen) + +define @test_sf_vc_v_xv_e8m4( %vs2, i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xv_e8m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xv.nxv32i8.iXLen.i8.iXLen(iXLen 3, %vs2, i8 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xv.nxv32i8.iXLen.i8.iXLen(iXLen, , i8, iXLen) + +define @test_sf_vc_v_xv_e8m8( %vs2, i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xv_e8m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xv.nxv64i8.iXLen.i8.iXLen(iXLen 3, %vs2, i8 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xv.nxv64i8.iXLen.i8.iXLen(iXLen, , i8, iXLen) + +define @test_sf_vc_v_xv_e16mf4( %vs2, i16 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xv_e16mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xv.nxv1i16.iXLen.i16.iXLen(iXLen 3, %vs2, i16 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xv.nxv1i16.iXLen.i16.iXLen(iXLen, , i16, iXLen) + +define @test_sf_vc_v_xv_e16mf2( %vs2, i16 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xv_e16mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xv.nxv2i16.iXLen.i16.iXLen(iXLen 3, %vs2, i16 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xv.nxv2i16.iXLen.i16.iXLen(iXLen, , i16, iXLen) + +define @test_sf_vc_v_xv_e16m1( %vs2, i16 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xv_e16m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xv.nxv4i16.iXLen.i16.iXLen(iXLen 3, %vs2, i16 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xv.nxv4i16.iXLen.i16.iXLen(iXLen, , i16, iXLen) + +define @test_sf_vc_v_xv_e16m2( %vs2, i16 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xv_e16m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xv.nxv8i16.iXLen.i16.iXLen(iXLen 3, %vs2, i16 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xv.nxv8i16.iXLen.i16.iXLen(iXLen, , i16, iXLen) + +define @test_sf_vc_v_xv_e16m4( %vs2, i16 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xv_e16m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xv.nxv16i16.iXLen.i16.iXLen(iXLen 3, %vs2, i16 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xv.nxv16i16.iXLen.i16.iXLen(iXLen, , i16, iXLen) + +define @test_sf_vc_v_xv_e16m8( %vs2, i16 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xv_e16m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xv.nxv32i16.iXLen.i16.iXLen(iXLen 3, %vs2, i16 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xv.nxv32i16.iXLen.i16.iXLen(iXLen, , i16, iXLen) + +define @test_sf_vc_v_xv_e32mf2( %vs2, i32 signext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xv_e32mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xv.nxv1i32.i32.i32.iXLen(iXLen 3, %vs2, i32 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xv.nxv1i32.i32.i32.iXLen(iXLen, , i32, iXLen) + +define @test_sf_vc_v_xv_e32m1( %vs2, i32 signext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xv_e32m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xv.nxv2i32.i32.i32.iXLen(iXLen 3, %vs2, i32 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xv.nxv2i32.i32.i32.iXLen(iXLen, , i32, iXLen) + +define @test_sf_vc_v_xv_e32m2( %vs2, i32 signext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xv_e32m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xv.nxv4i32.i32.i32.iXLen(iXLen 3, %vs2, i32 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xv.nxv4i32.i32.i32.iXLen(iXLen, , i32, iXLen) + +define @test_sf_vc_v_xv_e32m4( %vs2, i32 signext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xv_e32m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xv.nxv8i32.i32.i32.iXLen(iXLen 3, %vs2, i32 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xv.nxv8i32.i32.i32.iXLen(iXLen, , i32, iXLen) + +define @test_sf_vc_v_xv_e32m8( %vs2, i32 signext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xv_e32m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xv.nxv16i32.i32.i32.iXLen(iXLen 3, %vs2, i32 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xv.nxv16i32.i32.i32.iXLen(iXLen, , i32, iXLen) + +define void @test_sf_vc_iv_se_e8mf8( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_iv_se_e8mf8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1i8.iXLen.iXLen(iXLen 3, iXLen 31, %vs2, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1i8.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) + +define void @test_sf_vc_iv_se_e8mf4( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_iv_se_e8mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2i8.iXLen.iXLen(iXLen 3, iXLen 31, %vs2, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2i8.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) + +define void @test_sf_vc_iv_se_e8mf2( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_iv_se_e8mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4i8.iXLen.iXLen(iXLen 3, iXLen 31, %vs2, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4i8.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) + +define void @test_sf_vc_iv_se_e8m1( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_iv_se_e8m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8i8.iXLen.iXLen(iXLen 3, iXLen 31, %vs2, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8i8.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) + +define void @test_sf_vc_iv_se_e8m2( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_iv_se_e8m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv16i8.iXLen.iXLen(iXLen 3, iXLen 31, %vs2, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv16i8.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) + +define void @test_sf_vc_iv_se_e8m4( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_iv_se_e8m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv32i8.iXLen.iXLen(iXLen 3, iXLen 31, %vs2, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv32i8.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) + +define void @test_sf_vc_iv_se_e8m8( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_iv_se_e8m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv64i8.iXLen.iXLen(iXLen 3, iXLen 31, %vs2, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv64i8.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) + +define void @test_sf_vc_iv_se_e16mf4( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_iv_se_e16mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1i16.iXLen.iXLen(iXLen 3, iXLen 31, %vs2, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1i16.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) + +define void @test_sf_vc_iv_se_e16mf2( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_iv_se_e16mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2i16.iXLen.iXLen(iXLen 3, iXLen 31, %vs2, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2i16.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) + +define void @test_sf_vc_iv_se_e16m1( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_iv_se_e16m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4i16.iXLen.iXLen(iXLen 3, iXLen 31, %vs2, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4i16.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) + +define void @test_sf_vc_iv_se_e16m2( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_iv_se_e16m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8i16.iXLen.iXLen(iXLen 3, iXLen 31, %vs2, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8i16.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) + +define void @test_sf_vc_iv_se_e16m4( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_iv_se_e16m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv16i16.iXLen.iXLen(iXLen 3, iXLen 31, %vs2, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv16i16.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) + +define void @test_sf_vc_iv_se_e16m8( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_iv_se_e16m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv32i16.iXLen.iXLen(iXLen 3, iXLen 31, %vs2, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv32i16.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) + +define void @test_sf_vc_iv_se_e32mf2( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_iv_se_e32mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1i32.iXLen.iXLen(iXLen 3, iXLen 31, %vs2, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1i32.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) + +define void @test_sf_vc_iv_se_e32m1( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_iv_se_e32m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2i32.iXLen.iXLen(iXLen 3, iXLen 31, %vs2, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2i32.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) + +define void @test_sf_vc_iv_se_e32m2( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_iv_se_e32m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4i32.iXLen.iXLen(iXLen 3, iXLen 31, %vs2, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4i32.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) + +define void @test_sf_vc_iv_se_e32m4( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_iv_se_e32m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8i32.iXLen.iXLen(iXLen 3, iXLen 31, %vs2, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8i32.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) + +define void @test_sf_vc_iv_se_e32m8( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_iv_se_e32m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv16i32.iXLen.iXLen(iXLen 3, iXLen 31, %vs2, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv16i32.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) + +define void @test_sf_vc_iv_se_e64m1( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_iv_se_e64m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1i64.iXLen.iXLen(iXLen 3, iXLen 31, %vs2, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1i64.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) + +define void @test_sf_vc_iv_se_e64m2( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_iv_se_e64m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2i64.iXLen.iXLen(iXLen 3, iXLen 31, %vs2, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2i64.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) + +define void @test_sf_vc_iv_se_e64m4( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_iv_se_e64m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4i64.iXLen.iXLen(iXLen 3, iXLen 31, %vs2, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4i64.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) + +define void @test_sf_vc_iv_se_e64m8( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_iv_se_e64m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8i64.iXLen.iXLen(iXLen 3, iXLen 31, %vs2, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8i64.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) + +define @test_sf_vc_v_iv_se_e8mf8( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_iv_se_e8mf8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.iv.se.nxv1i8.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.iv.se.nxv1i8.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) + +define @test_sf_vc_v_iv_se_e8mf4( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_iv_se_e8mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.iv.se.nxv2i8.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.iv.se.nxv2i8.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) + +define @test_sf_vc_v_iv_se_e8mf2( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_iv_se_e8mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.iv.se.nxv4i8.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.iv.se.nxv4i8.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) + +define @test_sf_vc_v_iv_se_e8m1( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_iv_se_e8m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.iv.se.nxv8i8.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.iv.se.nxv8i8.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) + +define @test_sf_vc_v_iv_se_e8m2( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_iv_se_e8m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.iv.se.nxv16i8.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.iv.se.nxv16i8.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) + +define @test_sf_vc_v_iv_se_e8m4( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_iv_se_e8m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.iv.se.nxv32i8.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.iv.se.nxv32i8.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) + +define @test_sf_vc_v_iv_se_e8m8( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_iv_se_e8m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.iv.se.nxv64i8.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.iv.se.nxv64i8.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) + +define @test_sf_vc_v_iv_se_e16mf4( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_iv_se_e16mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.iv.se.nxv1i16.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.iv.se.nxv1i16.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) + +define @test_sf_vc_v_iv_se_e16mf2( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_iv_se_e16mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.iv.se.nxv2i16.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.iv.se.nxv2i16.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) + +define @test_sf_vc_v_iv_se_e16m1( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_iv_se_e16m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.iv.se.nxv4i16.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.iv.se.nxv4i16.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) + +define @test_sf_vc_v_iv_se_e16m2( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_iv_se_e16m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.iv.se.nxv8i16.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.iv.se.nxv8i16.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) + +define @test_sf_vc_v_iv_se_e16m4( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_iv_se_e16m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.iv.se.nxv16i16.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.iv.se.nxv16i16.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) + +define @test_sf_vc_v_iv_se_e16m8( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_iv_se_e16m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.iv.se.nxv32i16.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.iv.se.nxv32i16.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) + +define @test_sf_vc_v_iv_se_e32mf2( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_iv_se_e32mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.iv.se.nxv1i32.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.iv.se.nxv1i32.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) + +define @test_sf_vc_v_iv_se_e32m1( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_iv_se_e32m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.iv.se.nxv2i32.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.iv.se.nxv2i32.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) + +define @test_sf_vc_v_iv_se_e32m2( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_iv_se_e32m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.iv.se.nxv4i32.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.iv.se.nxv4i32.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) + +define @test_sf_vc_v_iv_se_e32m4( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_iv_se_e32m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.iv.se.nxv8i32.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.iv.se.nxv8i32.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) + +define @test_sf_vc_v_iv_se_e32m8( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_iv_se_e32m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.iv.se.nxv16i32.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.iv.se.nxv16i32.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) + +define @test_sf_vc_v_iv_se_e64m1( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_iv_se_e64m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.iv.se.nxv1i64.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.iv.se.nxv1i64.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) + +define @test_sf_vc_v_iv_se_e64m2( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_iv_se_e64m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.iv.se.nxv2i64.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.iv.se.nxv2i64.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) + +define @test_sf_vc_v_iv_se_e64m4( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_iv_se_e64m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.iv.se.nxv4i64.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.iv.se.nxv4i64.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) + +define @test_sf_vc_v_iv_se_e64m8( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_iv_se_e64m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.iv.se.nxv8i64.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.iv.se.nxv8i64.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) + +define @test_sf_vc_v_iv_e8mf8( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_iv_e8mf8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.iv.nxv1i8.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.iv.nxv1i8.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) + +define @test_sf_vc_v_iv_e8mf4( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_iv_e8mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.iv.nxv2i8.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.iv.nxv2i8.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) + +define @test_sf_vc_v_iv_e8mf2( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_iv_e8mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.iv.nxv4i8.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.iv.nxv4i8.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) + +define @test_sf_vc_v_iv_e8m1( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_iv_e8m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.iv.nxv8i8.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.iv.nxv8i8.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) + +define @test_sf_vc_v_iv_e8m2( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_iv_e8m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.iv.nxv16i8.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.iv.nxv16i8.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) + +define @test_sf_vc_v_iv_e8m4( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_iv_e8m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.iv.nxv32i8.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.iv.nxv32i8.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) + +define @test_sf_vc_v_iv_e8m8( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_iv_e8m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.iv.nxv64i8.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.iv.nxv64i8.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) + +define @test_sf_vc_v_iv_e16mf4( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_iv_e16mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.iv.nxv1i16.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.iv.nxv1i16.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) + +define @test_sf_vc_v_iv_e16mf2( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_iv_e16mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.iv.nxv2i16.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.iv.nxv2i16.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) + +define @test_sf_vc_v_iv_e16m1( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_iv_e16m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.iv.nxv4i16.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.iv.nxv4i16.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) + +define @test_sf_vc_v_iv_e16m2( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_iv_e16m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.iv.nxv8i16.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.iv.nxv8i16.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) + +define @test_sf_vc_v_iv_e16m4( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_iv_e16m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.iv.nxv16i16.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.iv.nxv16i16.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) + +define @test_sf_vc_v_iv_e16m8( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_iv_e16m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.iv.nxv32i16.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.iv.nxv32i16.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) + +define @test_sf_vc_v_iv_e32mf2( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_iv_e32mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.iv.nxv1i32.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.iv.nxv1i32.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) + +define @test_sf_vc_v_iv_e32m1( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_iv_e32m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.iv.nxv2i32.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.iv.nxv2i32.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) + +define @test_sf_vc_v_iv_e32m2( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_iv_e32m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.iv.nxv4i32.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.iv.nxv4i32.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) + +define @test_sf_vc_v_iv_e32m4( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_iv_e32m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.iv.nxv8i32.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.iv.nxv8i32.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) + +define @test_sf_vc_v_iv_e32m8( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_iv_e32m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.iv.nxv16i32.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.iv.nxv16i32.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) + +define @test_sf_vc_v_iv_e64m1( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_iv_e64m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.iv.nxv1i64.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.iv.nxv1i64.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) + +define @test_sf_vc_v_iv_e64m2( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_iv_e64m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.iv.nxv2i64.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.iv.nxv2i64.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) + +define @test_sf_vc_v_iv_e64m4( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_iv_e64m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.iv.nxv4i64.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.iv.nxv4i64.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) + +define @test_sf_vc_v_iv_e64m8( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_iv_e64m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.iv.nxv8i64.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.iv.nxv8i64.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) + +define void @test_sf_vc_fv_se_e16mf4( %vs2, half %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_se_e16mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1i16.f16.iXLen(iXLen 1, iXLen 31, %vs2, half %fs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1i16.f16.iXLen(iXLen, iXLen, , half, iXLen) + +define void @test_sf_vc_fv_se_e16mf2( %vs2, half %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_se_e16mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2i16.f16.iXLen(iXLen 1, iXLen 31, %vs2, half %fs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2i16.f16.iXLen(iXLen, iXLen, , half, iXLen) + +define void @test_sf_vc_fv_se_e16m1( %vs2, half %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_se_e16m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4i16.f16.iXLen(iXLen 1, iXLen 31, %vs2, half %fs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4i16.f16.iXLen(iXLen, iXLen, , half, iXLen) + +define void @test_sf_vc_fv_se_e16m2( %vs2, half %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_se_e16m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8i16.f16.iXLen(iXLen 1, iXLen 31, %vs2, half %fs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8i16.f16.iXLen(iXLen, iXLen, , half, iXLen) + +define void @test_sf_vc_fv_se_e16m4( %vs2, half %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_se_e16m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv16i16.f16.iXLen(iXLen 1, iXLen 31, %vs2, half %fs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv16i16.f16.iXLen(iXLen, iXLen, , half, iXLen) + +define void @test_sf_vc_fv_se_e16m8( %vs2, half %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_se_e16m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv32i16.f16.iXLen(iXLen 1, iXLen 31, %vs2, half %fs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv32i16.f16.iXLen(iXLen, iXLen, , half, iXLen) + +define void @test_sf_vc_fv_se_e32mf2( %vs2, float %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_se_e32mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1i32.f32.iXLen(iXLen 1, iXLen 31, %vs2, float %fs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1i32.f32.iXLen(iXLen, iXLen, , float, iXLen) + +define void @test_sf_vc_fv_se_e32m1( %vs2, float %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_se_e32m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2i32.f32.iXLen(iXLen 1, iXLen 31, %vs2, float %fs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2i32.f32.iXLen(iXLen, iXLen, , float, iXLen) + +define void @test_sf_vc_fv_se_e32m2( %vs2, float %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_se_e32m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4i32.f32.iXLen(iXLen 1, iXLen 31, %vs2, float %fs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4i32.f32.iXLen(iXLen, iXLen, , float, iXLen) + +define void @test_sf_vc_fv_se_e32m4( %vs2, float %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_se_e32m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8i32.f32.iXLen(iXLen 1, iXLen 31, %vs2, float %fs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8i32.f32.iXLen(iXLen, iXLen, , float, iXLen) + +define void @test_sf_vc_fv_se_e32m8( %vs2, float %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_se_e32m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv16i32.f32.iXLen(iXLen 1, iXLen 31, %vs2, float %fs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv16i32.f32.iXLen(iXLen, iXLen, , float, iXLen) + +define void @test_sf_vc_fv_se_e64m1( %vs2, double %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_se_e64m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1i64.f64.iXLen(iXLen 1, iXLen 31, %vs2, double %fs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1i64.f64.iXLen(iXLen, iXLen, , double, iXLen) + +define void @test_sf_vc_fv_se_e64m2( %vs2, double %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_se_e64m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2i64.f64.iXLen(iXLen 1, iXLen 31, %vs2, double %fs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2i64.f64.iXLen(iXLen, iXLen, , double, iXLen) + +define void @test_sf_vc_fv_se_e64m4( %vs2, double %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_se_e64m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4i64.f64.iXLen(iXLen 1, iXLen 31, %vs2, double %fs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4i64.f64.iXLen(iXLen, iXLen, , double, iXLen) + +define void @test_sf_vc_fv_se_e64m8( %vs2, double %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_se_e64m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8i64.f64.iXLen(iXLen 1, iXLen 31, %vs2, double %fs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8i64.f64.iXLen(iXLen, iXLen, , double, iXLen) + +define @test_sf_vc_v_fv_se_e16mf4( %vs2, half %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fv_se_e16mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fv.se.nxv1i16.iXLen.f16.iXLen(iXLen 1, %vs2, half %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fv.se.nxv1i16.iXLen.f16.iXLen(iXLen, , half, iXLen) + +define @test_sf_vc_v_fv_se_e16mf2( %vs2, half %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fv_se_e16mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fv.se.nxv2i16.iXLen.f16.iXLen(iXLen 1, %vs2, half %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fv.se.nxv2i16.iXLen.f16.iXLen(iXLen, , half, iXLen) + +define @test_sf_vc_v_fv_se_e16m1( %vs2, half %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fv_se_e16m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fv.se.nxv4i16.iXLen.f16.iXLen(iXLen 1, %vs2, half %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fv.se.nxv4i16.iXLen.f16.iXLen(iXLen, , half, iXLen) + +define @test_sf_vc_v_fv_se_e16m2( %vs2, half %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fv_se_e16m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fv.se.nxv8i16.iXLen.f16.iXLen(iXLen 1, %vs2, half %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fv.se.nxv8i16.iXLen.f16.iXLen(iXLen, , half, iXLen) + +define @test_sf_vc_v_fv_se_e16m4( %vs2, half %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fv_se_e16m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fv.se.nxv16i16.iXLen.f16.iXLen(iXLen 1, %vs2, half %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fv.se.nxv16i16.iXLen.f16.iXLen(iXLen, , half, iXLen) + +define @test_sf_vc_v_fv_se_e16m8( %vs2, half %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fv_se_e16m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fv.se.nxv32i16.iXLen.f16.iXLen(iXLen 1, %vs2, half %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fv.se.nxv32i16.iXLen.f16.iXLen(iXLen, , half, iXLen) + +define @test_sf_vc_v_fv_se_e32mf2( %vs2, float %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fv_se_e32mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fv.se.nxv1i32.iXLen.f32.iXLen(iXLen 1, %vs2, float %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fv.se.nxv1i32.iXLen.f32.iXLen(iXLen, , float, iXLen) + +define @test_sf_vc_v_fv_se_e32m1( %vs2, float %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fv_se_e32m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fv.se.nxv2i32.iXLen.f32.iXLen(iXLen 1, %vs2, float %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fv.se.nxv2i32.iXLen.f32.iXLen(iXLen, , float, iXLen) + +define @test_sf_vc_v_fv_se_e32m2( %vs2, float %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fv_se_e32m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fv.se.nxv4i32.iXLen.f32.iXLen(iXLen 1, %vs2, float %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fv.se.nxv4i32.iXLen.f32.iXLen(iXLen, , float, iXLen) + +define @test_sf_vc_v_fv_se_e32m4( %vs2, float %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fv_se_e32m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fv.se.nxv8i32.iXLen.f32.iXLen(iXLen 1, %vs2, float %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fv.se.nxv8i32.iXLen.f32.iXLen(iXLen, , float, iXLen) + +define @test_sf_vc_v_fv_se_e32m8( %vs2, float %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fv_se_e32m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fv.se.nxv16i32.iXLen.f32.iXLen(iXLen 1, %vs2, float %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fv.se.nxv16i32.iXLen.f32.iXLen(iXLen, , float, iXLen) + +define @test_sf_vc_v_fv_se_e64m1( %vs2, double %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fv_se_e64m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fv.se.nxv1i64.iXLen.f64.iXLen(iXLen 1, %vs2, double %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fv.se.nxv1i64.iXLen.f64.iXLen(iXLen, , double, iXLen) + +define @test_sf_vc_v_fv_se_e64m2( %vs2, double %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fv_se_e64m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fv.se.nxv2i64.iXLen.f64.iXLen(iXLen 1, %vs2, double %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fv.se.nxv2i64.iXLen.f64.iXLen(iXLen, , double, iXLen) + +define @test_sf_vc_v_fv_se_e64m4( %vs2, double %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fv_se_e64m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fv.se.nxv4i64.iXLen.f64.iXLen(iXLen 1, %vs2, double %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fv.se.nxv4i64.iXLen.f64.iXLen(iXLen, , double, iXLen) + +define @test_sf_vc_v_fv_se_e64m8( %vs2, double %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fv_se_e64m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fv.se.nxv8i64.iXLen.f64.iXLen(iXLen 1, %vs2, double %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fv.se.nxv8i64.iXLen.f64.iXLen(iXLen, , double, iXLen) + +define @test_sf_vc_v_fv_e16mf4( %vs2, half %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fv_e16mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fv.nxv1i16.iXLen.f16.iXLen(iXLen 1, %vs2, half %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fv.nxv1i16.iXLen.f16.iXLen(iXLen, , half, iXLen) + +define @test_sf_vc_v_fv_e16mf2( %vs2, half %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fv_e16mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fv.nxv2i16.iXLen.f16.iXLen(iXLen 1, %vs2, half %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fv.nxv2i16.iXLen.f16.iXLen(iXLen, , half, iXLen) + +define @test_sf_vc_v_fv_e16m1( %vs2, half %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fv_e16m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fv.nxv4i16.iXLen.f16.iXLen(iXLen 1, %vs2, half %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fv.nxv4i16.iXLen.f16.iXLen(iXLen, , half, iXLen) + +define @test_sf_vc_v_fv_e16m2( %vs2, half %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fv_e16m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fv.nxv8i16.iXLen.f16.iXLen(iXLen 1, %vs2, half %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fv.nxv8i16.iXLen.f16.iXLen(iXLen, , half, iXLen) + +define @test_sf_vc_v_fv_e16m4( %vs2, half %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fv_e16m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fv.nxv16i16.iXLen.f16.iXLen(iXLen 1, %vs2, half %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fv.nxv16i16.iXLen.f16.iXLen(iXLen, , half, iXLen) + +define @test_sf_vc_v_fv_e16m8( %vs2, half %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fv_e16m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fv.nxv32i16.iXLen.f16.iXLen(iXLen 1, %vs2, half %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fv.nxv32i16.iXLen.f16.iXLen(iXLen, , half, iXLen) + +define @test_sf_vc_v_fv_e32mf2( %vs2, float %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fv_e32mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fv.nxv1i32.iXLen.f32.iXLen(iXLen 1, %vs2, float %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fv.nxv1i32.iXLen.f32.iXLen(iXLen, , float, iXLen) + +define @test_sf_vc_v_fv_e32m1( %vs2, float %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fv_e32m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fv.nxv2i32.iXLen.f32.iXLen(iXLen 1, %vs2, float %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fv.nxv2i32.iXLen.f32.iXLen(iXLen, , float, iXLen) + +define @test_sf_vc_v_fv_e32m2( %vs2, float %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fv_e32m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fv.nxv4i32.iXLen.f32.iXLen(iXLen 1, %vs2, float %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fv.nxv4i32.iXLen.f32.iXLen(iXLen, , float, iXLen) + +define @test_sf_vc_v_fv_e32m4( %vs2, float %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fv_e32m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fv.nxv8i32.iXLen.f32.iXLen(iXLen 1, %vs2, float %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fv.nxv8i32.iXLen.f32.iXLen(iXLen, , float, iXLen) + +define @test_sf_vc_v_fv_e32m8( %vs2, float %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fv_e32m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fv.nxv16i32.iXLen.f32.iXLen(iXLen 1, %vs2, float %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fv.nxv16i32.iXLen.f32.iXLen(iXLen, , float, iXLen) + +define @test_sf_vc_v_fv_e64m1( %vs2, double %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fv_e64m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fv.nxv1i64.iXLen.f64.iXLen(iXLen 1, %vs2, double %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fv.nxv1i64.iXLen.f64.iXLen(iXLen, , double, iXLen) + +define @test_sf_vc_v_fv_e64m2( %vs2, double %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fv_e64m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fv.nxv2i64.iXLen.f64.iXLen(iXLen 1, %vs2, double %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fv.nxv2i64.iXLen.f64.iXLen(iXLen, , double, iXLen) + +define @test_sf_vc_v_fv_e64m4( %vs2, double %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fv_e64m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fv.nxv4i64.iXLen.f64.iXLen(iXLen 1, %vs2, double %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fv.nxv4i64.iXLen.f64.iXLen(iXLen, , double, iXLen) + +define @test_sf_vc_v_fv_e64m8( %vs2, double %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fv_e64m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fv.nxv8i64.iXLen.f64.iXLen(iXLen 1, %vs2, double %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fv.nxv8i64.iXLen.f64.iXLen(iXLen, , double, iXLen) diff --git a/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvv.ll b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvv.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvv.ll @@ -0,0 +1,3020 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+xsfvcp \ +; RUN: -verify-machineinstrs | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+xsfvcp \ +; RUN: -verify-machineinstrs | FileCheck %s + +define void @test_sf_vc_vvv_se_e8mf8( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_vvv_se_e8mf8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1i8.nxv1i8.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1i8.nxv1i8.iXLen(iXLen, , , , iXLen) + +define void @test_sf_vc_vvv_se_e8mf4( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_vvv_se_e8mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2i8.nxv2i8.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2i8.nxv2i8.iXLen(iXLen, , , , iXLen) + +define void @test_sf_vc_vvv_se_e8mf2( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_vvv_se_e8mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4i8.nxv4i8.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4i8.nxv4i8.iXLen(iXLen, , , , iXLen) + +define void @test_sf_vc_vvv_se_e8m1( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_vvv_se_e8m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8i8.nxv8i8.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8i8.nxv8i8.iXLen(iXLen, , , , iXLen) + +define void @test_sf_vc_vvv_se_e8m2( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_vvv_se_e8m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: sf.vc.vvv 3, v8, v10, v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16i8.nxv16i8.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16i8.nxv16i8.iXLen(iXLen, , , , iXLen) + +define void @test_sf_vc_vvv_se_e8m4( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_vvv_se_e8m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: sf.vc.vvv 3, v8, v12, v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv32i8.nxv32i8.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv32i8.nxv32i8.iXLen(iXLen, , , , iXLen) + +define void @test_sf_vc_vvv_se_e8m8( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_vvv_se_e8m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8r.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: sf.vc.vvv 3, v8, v16, v24 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv64i8.nxv64i8.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv64i8.nxv64i8.iXLen(iXLen, , , , iXLen) + +define void @test_sf_vc_vvv_se_e16mf4( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_vvv_se_e16mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1i16.nxv1i16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1i16.nxv1i16.iXLen(iXLen, , , , iXLen) + +define void @test_sf_vc_vvv_se_e16mf2( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_vvv_se_e16mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2i16.nxv2i16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2i16.nxv2i16.iXLen(iXLen, , , , iXLen) + +define void @test_sf_vc_vvv_se_e16m1( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_vvv_se_e16m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4i16.nxv4i16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4i16.nxv4i16.iXLen(iXLen, , , , iXLen) + +define void @test_sf_vc_vvv_se_e16m2( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_vvv_se_e16m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.vvv 3, v8, v10, v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8i16.nxv8i16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8i16.nxv8i16.iXLen(iXLen, , , , iXLen) + +define void @test_sf_vc_vvv_se_e16m4( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_vvv_se_e16m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: sf.vc.vvv 3, v8, v12, v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16i16.nxv16i16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16i16.nxv16i16.iXLen(iXLen, , , , iXLen) + +define void @test_sf_vc_vvv_se_e16m8( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_vvv_se_e16m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re16.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: sf.vc.vvv 3, v8, v16, v24 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv32i16.nxv32i16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv32i16.nxv32i16.iXLen(iXLen, , , , iXLen) + +define void @test_sf_vc_vvv_se_e32mf2( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_vvv_se_e32mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1i32.nxv1i32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1i32.nxv1i32.iXLen(iXLen, , , , iXLen) + +define void @test_sf_vc_vvv_se_e32m1( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_vvv_se_e32m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2i32.nxv2i32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2i32.nxv2i32.iXLen(iXLen, , , , iXLen) + +define void @test_sf_vc_vvv_se_e32m2( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_vvv_se_e32m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.vvv 3, v8, v10, v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4i32.nxv4i32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4i32.nxv4i32.iXLen(iXLen, , , , iXLen) + +define void @test_sf_vc_vvv_se_e32m4( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_vvv_se_e32m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.vvv 3, v8, v12, v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8i32.nxv8i32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8i32.nxv8i32.iXLen(iXLen, , , , iXLen) + +define void @test_sf_vc_vvv_se_e32m8( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_vvv_se_e32m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re32.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: sf.vc.vvv 3, v8, v16, v24 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16i32.nxv16i32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16i32.nxv16i32.iXLen(iXLen, , , , iXLen) + +define void @test_sf_vc_vvv_se_e64m1( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_vvv_se_e64m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1i64.nxv1i64.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1i64.nxv1i64.iXLen(iXLen, , , , iXLen) + +define void @test_sf_vc_vvv_se_e64m2( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_vvv_se_e64m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: sf.vc.vvv 3, v8, v10, v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2i64.nxv2i64.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2i64.nxv2i64.iXLen(iXLen, , , , iXLen) + +define void @test_sf_vc_vvv_se_e64m4( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_vvv_se_e64m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: sf.vc.vvv 3, v8, v12, v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4i64.nxv4i64.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4i64.nxv4i64.iXLen(iXLen, , , , iXLen) + +define void @test_sf_vc_vvv_se_e64m8( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_vvv_se_e64m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re64.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: sf.vc.vvv 3, v8, v16, v24 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8i64.nxv8i64.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8i64.nxv8i64.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvv_se_e8mf8( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvv_se_e8mf8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvv.se.nxv1i8.iXLen.nxv1i8.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvv.se.nxv1i8.iXLen.nxv1i8.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvv_se_e8mf4( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvv_se_e8mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvv.se.nxv2i8.iXLen.nxv2i8.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvv.se.nxv2i8.iXLen.nxv2i8.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvv_se_e8mf2( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvv_se_e8mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvv.se.nxv4i8.iXLen.nxv4i8.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvv.se.nxv4i8.iXLen.nxv4i8.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvv_se_e8m1( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvv_se_e8m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvv.se.nxv8i8.iXLen.nxv8i8.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvv.se.nxv8i8.iXLen.nxv8i8.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvv_se_e8m2( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvv_se_e8m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v10, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvv.se.nxv16i8.iXLen.nxv16i8.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvv.se.nxv16i8.iXLen.nxv16i8.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvv_se_e8m4( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvv_se_e8m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v12, v16 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvv.se.nxv32i8.iXLen.nxv32i8.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvv.se.nxv32i8.iXLen.nxv32i8.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvv_se_e8m8( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvv_se_e8m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8r.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v16, v24 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvv.se.nxv64i8.iXLen.nxv64i8.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvv.se.nxv64i8.iXLen.nxv64i8.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvv_se_e16mf4( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvv_se_e16mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvv.se.nxv1i16.iXLen.nxv1i16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvv.se.nxv1i16.iXLen.nxv1i16.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvv_se_e16mf2( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvv_se_e16mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvv.se.nxv2i16.iXLen.nxv2i16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvv.se.nxv2i16.iXLen.nxv2i16.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvv_se_e16m1( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvv_se_e16m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvv.se.nxv4i16.iXLen.nxv4i16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvv.se.nxv4i16.iXLen.nxv4i16.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvv_se_e16m2( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvv_se_e16m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v10, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvv.se.nxv8i16.iXLen.nxv8i16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvv.se.nxv8i16.iXLen.nxv8i16.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvv_se_e16m4( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvv_se_e16m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v12, v16 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvv.se.nxv16i16.iXLen.nxv16i16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvv.se.nxv16i16.iXLen.nxv16i16.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvv_se_e16m8( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvv_se_e16m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re16.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v16, v24 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvv.se.nxv32i16.iXLen.nxv32i16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvv.se.nxv32i16.iXLen.nxv32i16.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvv_se_e32mf2( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvv_se_e32mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvv.se.nxv1i32.iXLen.nxv1i32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvv.se.nxv1i32.iXLen.nxv1i32.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvv_se_e32m1( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvv_se_e32m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvv.se.nxv2i32.iXLen.nxv2i32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvv.se.nxv2i32.iXLen.nxv2i32.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvv_se_e32m2( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvv_se_e32m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v10, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvv.se.nxv4i32.iXLen.nxv4i32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvv.se.nxv4i32.iXLen.nxv4i32.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvv_se_e32m4( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvv_se_e32m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v12, v16 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvv.se.nxv8i32.iXLen.nxv8i32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvv.se.nxv8i32.iXLen.nxv8i32.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvv_se_e32m8( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvv_se_e32m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re32.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v16, v24 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvv.se.nxv16i32.iXLen.nxv16i32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvv.se.nxv16i32.iXLen.nxv16i32.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvv_se_e64m1( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvv_se_e64m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvv.se.nxv1i64.iXLen.nxv1i64.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvv.se.nxv1i64.iXLen.nxv1i64.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvv_se_e64m2( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvv_se_e64m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v10, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvv.se.nxv2i64.iXLen.nxv2i64.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvv.se.nxv2i64.iXLen.nxv2i64.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvv_se_e64m4( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvv_se_e64m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v12, v16 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvv.se.nxv4i64.iXLen.nxv4i64.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvv.se.nxv4i64.iXLen.nxv4i64.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvv_se_e64m8( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvv_se_e64m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re64.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v16, v24 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvv.se.nxv8i64.iXLen.nxv8i64.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvv.se.nxv8i64.iXLen.nxv8i64.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvv_e8mf8( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvv_e8mf8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvv.nxv1i8.iXLen.nxv1i8.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvv.nxv1i8.iXLen.nxv1i8.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvv_e8mf4( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvv_e8mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvv.nxv2i8.iXLen.nxv2i8.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvv.nxv2i8.iXLen.nxv2i8.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvv_e8mf2( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvv_e8mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvv.nxv4i8.iXLen.nxv4i8.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvv.nxv4i8.iXLen.nxv4i8.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvv_e8m1( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvv_e8m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvv.nxv8i8.iXLen.nxv8i8.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvv.nxv8i8.iXLen.nxv8i8.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvv_e8m2( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvv_e8m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v10, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvv.nxv16i8.iXLen.nxv16i8.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvv.nxv16i8.iXLen.nxv16i8.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvv_e8m4( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvv_e8m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v12, v16 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvv.nxv32i8.iXLen.nxv32i8.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvv.nxv32i8.iXLen.nxv32i8.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvv_e8m8( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvv_e8m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8r.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v16, v24 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvv.nxv64i8.iXLen.nxv64i8.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvv.nxv64i8.iXLen.nxv64i8.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvv_e16mf4( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvv_e16mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvv.nxv1i16.iXLen.nxv1i16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvv.nxv1i16.iXLen.nxv1i16.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvv_e16mf2( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvv_e16mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvv.nxv2i16.iXLen.nxv2i16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvv.nxv2i16.iXLen.nxv2i16.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvv_e16m1( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvv_e16m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvv.nxv4i16.iXLen.nxv4i16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvv.nxv4i16.iXLen.nxv4i16.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvv_e16m2( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvv_e16m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v10, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvv.nxv8i16.iXLen.nxv8i16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvv.nxv8i16.iXLen.nxv8i16.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvv_e16m4( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvv_e16m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v12, v16 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvv.nxv16i16.iXLen.nxv16i16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvv.nxv16i16.iXLen.nxv16i16.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvv_e16m8( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvv_e16m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re16.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v16, v24 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvv.nxv32i16.iXLen.nxv32i16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvv.nxv32i16.iXLen.nxv32i16.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvv_e32mf2( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvv_e32mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvv.nxv1i32.iXLen.nxv1i32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvv.nxv1i32.iXLen.nxv1i32.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvv_e32m1( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvv_e32m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvv.nxv2i32.iXLen.nxv2i32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvv.nxv2i32.iXLen.nxv2i32.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvv_e32m2( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvv_e32m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v10, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvv.nxv4i32.iXLen.nxv4i32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvv.nxv4i32.iXLen.nxv4i32.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvv_e32m4( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvv_e32m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v12, v16 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvv.nxv8i32.iXLen.nxv8i32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvv.nxv8i32.iXLen.nxv8i32.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvv_e32m8( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvv_e32m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re32.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v16, v24 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvv.nxv16i32.iXLen.nxv16i32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvv.nxv16i32.iXLen.nxv16i32.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvv_e64m1( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvv_e64m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvv.nxv1i64.iXLen.nxv1i64.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvv.nxv1i64.iXLen.nxv1i64.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvv_e64m2( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvv_e64m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v10, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvv.nxv2i64.iXLen.nxv2i64.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvv.nxv2i64.iXLen.nxv2i64.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvv_e64m4( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvv_e64m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v12, v16 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvv.nxv4i64.iXLen.nxv4i64.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvv.nxv4i64.iXLen.nxv4i64.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvv_e64m8( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvv_e64m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re64.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v16, v24 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvv.nxv8i64.iXLen.nxv8i64.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvv.nxv8i64.iXLen.nxv8i64.iXLen(iXLen, , , , iXLen) + +define void @test_sf_vc_xvv_se_e8mf8( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_xvv_se_e8mf8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv1i8.i8.iXLen(iXLen 3, %vd, %vs2, i8 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv1i8.i8.iXLen(iXLen, , , i8, iXLen) + +define void @test_sf_vc_xvv_se_e8mf4( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_xvv_se_e8mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv2i8.i8.iXLen(iXLen 3, %vd, %vs2, i8 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv2i8.i8.iXLen(iXLen, , , i8, iXLen) + +define void @test_sf_vc_xvv_se_e8mf2( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_xvv_se_e8mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv4i8.i8.iXLen(iXLen 3, %vd, %vs2, i8 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv4i8.i8.iXLen(iXLen, , , i8, iXLen) + +define void @test_sf_vc_xvv_se_e8m1( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_xvv_se_e8m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv8i8.i8.iXLen(iXLen 3, %vd, %vs2, i8 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv8i8.i8.iXLen(iXLen, , , i8, iXLen) + +define void @test_sf_vc_xvv_se_e8m2( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_xvv_se_e8m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: sf.vc.xvv 3, v8, v10, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv16i8.i8.iXLen(iXLen 3, %vd, %vs2, i8 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv16i8.i8.iXLen(iXLen, , , i8, iXLen) + +define void @test_sf_vc_xvv_se_e8m4( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_xvv_se_e8m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: sf.vc.xvv 3, v8, v12, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv32i8.i8.iXLen(iXLen 3, %vd, %vs2, i8 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv32i8.i8.iXLen(iXLen, , , i8, iXLen) + +define void @test_sf_vc_xvv_se_e8m8( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_xvv_se_e8m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: sf.vc.xvv 3, v8, v16, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv64i8.i8.iXLen(iXLen 3, %vd, %vs2, i8 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv64i8.i8.iXLen(iXLen, , , i8, iXLen) + +define void @test_sf_vc_xvv_se_e16mf4( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_xvv_se_e16mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv1i16.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv1i16.i16.iXLen(iXLen, , , i16, iXLen) + +define void @test_sf_vc_xvv_se_e16mf2( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_xvv_se_e16mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv2i16.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv2i16.i16.iXLen(iXLen, , , i16, iXLen) + +define void @test_sf_vc_xvv_se_e16m1( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_xvv_se_e16m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv4i16.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv4i16.i16.iXLen(iXLen, , , i16, iXLen) + +define void @test_sf_vc_xvv_se_e16m2( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_xvv_se_e16m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.xvv 3, v8, v10, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv8i16.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv8i16.i16.iXLen(iXLen, , , i16, iXLen) + +define void @test_sf_vc_xvv_se_e16m4( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_xvv_se_e16m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: sf.vc.xvv 3, v8, v12, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv16i16.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv16i16.i16.iXLen(iXLen, , , i16, iXLen) + +define void @test_sf_vc_xvv_se_e16m8( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_xvv_se_e16m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: sf.vc.xvv 3, v8, v16, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv32i16.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv32i16.i16.iXLen(iXLen, , , i16, iXLen) + +define void @test_sf_vc_xvv_se_e32mf2( %vd, %vs2, i32 signext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_xvv_se_e32mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.xvv.se.i32.nxv1i32.iXLen.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv1i32.iXLen.iXLen(iXLen, , , i32, iXLen) + +define void @test_sf_vc_xvv_se_e32m1( %vd, %vs2, i32 signext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_xvv_se_e32m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.xvv.se.i32.nxv2i32.iXLen.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv2i32.iXLen.iXLen(iXLen, , , i32, iXLen) + +define void @test_sf_vc_xvv_se_e32m2( %vd, %vs2, i32 signext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_xvv_se_e32m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.xvv 3, v8, v10, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.xvv.se.i32.nxv4i32.iXLen.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv4i32.iXLen.iXLen(iXLen, , , i32, iXLen) + +define void @test_sf_vc_xvv_se_e32m4( %vd, %vs2, i32 signext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_xvv_se_e32m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.xvv 3, v8, v12, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.xvv.se.i32.nxv8i32.iXLen.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv8i32.iXLen.iXLen(iXLen, , , i32, iXLen) + +define void @test_sf_vc_xvv_se_e32m8( %vd, %vs2, i32 signext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_xvv_se_e32m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: sf.vc.xvv 3, v8, v16, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.xvv.se.i32.nxv16i32.iXLen.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv16i32.iXLen.iXLen(iXLen, , , i32, iXLen) + +define @test_sf_vc_v_xvv_se_e8mf8( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xvv_se_e8mf8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xvv.se.nxv1i8.iXLen.i8.iXLen(iXLen 3, %vd, %vs2, i8 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xvv.se.nxv1i8.iXLen.i8.iXLen(iXLen, , , i8, iXLen) + +define @test_sf_vc_v_xvv_se_e8mf4( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xvv_se_e8mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xvv.se.nxv2i8.iXLen.i8.iXLen(iXLen 3, %vd, %vs2, i8 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xvv.se.nxv2i8.iXLen.i8.iXLen(iXLen, , , i8, iXLen) + +define @test_sf_vc_v_xvv_se_e8mf2( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xvv_se_e8mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xvv.se.nxv4i8.iXLen.i8.iXLen(iXLen 3, %vd, %vs2, i8 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xvv.se.nxv4i8.iXLen.i8.iXLen(iXLen, , , i8, iXLen) + +define @test_sf_vc_v_xvv_se_e8m1( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xvv_se_e8m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xvv.se.nxv8i8.iXLen.i8.iXLen(iXLen 3, %vd, %vs2, i8 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xvv.se.nxv8i8.iXLen.i8.iXLen(iXLen, , , i8, iXLen) + +define @test_sf_vc_v_xvv_se_e8m2( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xvv_se_e8m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: sf.vc.v.xvv 3, v8, v10, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xvv.se.nxv16i8.iXLen.i8.iXLen(iXLen 3, %vd, %vs2, i8 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xvv.se.nxv16i8.iXLen.i8.iXLen(iXLen, , , i8, iXLen) + +define @test_sf_vc_v_xvv_se_e8m4( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xvv_se_e8m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: sf.vc.v.xvv 3, v8, v12, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xvv.se.nxv32i8.iXLen.i8.iXLen(iXLen 3, %vd, %vs2, i8 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xvv.se.nxv32i8.iXLen.i8.iXLen(iXLen, , , i8, iXLen) + +define @test_sf_vc_v_xvv_se_e8m8( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xvv_se_e8m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: sf.vc.v.xvv 3, v8, v16, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xvv.se.nxv64i8.iXLen.i8.iXLen(iXLen 3, %vd, %vs2, i8 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xvv.se.nxv64i8.iXLen.i8.iXLen(iXLen, , , i8, iXLen) + +define @test_sf_vc_v_xvv_se_e16mf4( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xvv_se_e16mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xvv.se.nxv1i16.iXLen.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xvv.se.nxv1i16.iXLen.i16.iXLen(iXLen, , , i16, iXLen) + +define @test_sf_vc_v_xvv_se_e16mf2( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xvv_se_e16mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xvv.se.nxv2i16.iXLen.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xvv.se.nxv2i16.iXLen.i16.iXLen(iXLen, , , i16, iXLen) + +define @test_sf_vc_v_xvv_se_e16m1( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xvv_se_e16m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xvv.se.nxv4i16.iXLen.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xvv.se.nxv4i16.iXLen.i16.iXLen(iXLen, , , i16, iXLen) + +define @test_sf_vc_v_xvv_se_e16m2( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xvv_se_e16m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.v.xvv 3, v8, v10, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xvv.se.nxv8i16.iXLen.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xvv.se.nxv8i16.iXLen.i16.iXLen(iXLen, , , i16, iXLen) + +define @test_sf_vc_v_xvv_se_e16m4( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xvv_se_e16m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: sf.vc.v.xvv 3, v8, v12, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xvv.se.nxv16i16.iXLen.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xvv.se.nxv16i16.iXLen.i16.iXLen(iXLen, , , i16, iXLen) + +define @test_sf_vc_v_xvv_se_e16m8( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xvv_se_e16m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: sf.vc.v.xvv 3, v8, v16, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xvv.se.nxv32i16.iXLen.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xvv.se.nxv32i16.iXLen.i16.iXLen(iXLen, , , i16, iXLen) + +define @test_sf_vc_v_xvv_se_e32mf2( %vd, %vs2, i32 signext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xvv_se_e32mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xvv.se.nxv1i32.iXLen.i32.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xvv.se.nxv1i32.iXLen.i32.iXLen(iXLen, , , i32, iXLen) + +define @test_sf_vc_v_xvv_se_e32m1( %vd, %vs2, i32 signext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xvv_se_e32m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xvv.se.nxv2i32.iXLen.i32.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xvv.se.nxv2i32.iXLen.i32.iXLen(iXLen, , , i32, iXLen) + +define @test_sf_vc_v_xvv_se_e32m2( %vd, %vs2, i32 signext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xvv_se_e32m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.v.xvv 3, v8, v10, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xvv.se.nxv4i32.iXLen.i32.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xvv.se.nxv4i32.iXLen.i32.iXLen(iXLen, , , i32, iXLen) + +define @test_sf_vc_v_xvv_se_e32m4( %vd, %vs2, i32 signext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xvv_se_e32m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.v.xvv 3, v8, v12, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xvv.se.nxv8i32.iXLen.i32.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xvv.se.nxv8i32.iXLen.i32.iXLen(iXLen, , , i32, iXLen) + +define @test_sf_vc_v_xvv_se_e32m8( %vd, %vs2, i32 signext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xvv_se_e32m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: sf.vc.v.xvv 3, v8, v16, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xvv.se.nxv16i32.iXLen.i32.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xvv.se.nxv16i32.iXLen.i32.iXLen(iXLen, , , i32, iXLen) + +define @test_sf_vc_v_xvv_e8mf8( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xvv_e8mf8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xvv.nxv1i8.iXLen.i8.iXLen(iXLen 3, %vd, %vs2, i8 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xvv.nxv1i8.iXLen.i8.iXLen(iXLen, , , i8, iXLen) + +define @test_sf_vc_v_xvv_e8mf4( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xvv_e8mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xvv.nxv2i8.iXLen.i8.iXLen(iXLen 3, %vd, %vs2, i8 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xvv.nxv2i8.iXLen.i8.iXLen(iXLen, , , i8, iXLen) + +define @test_sf_vc_v_xvv_e8mf2( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xvv_e8mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xvv.nxv4i8.iXLen.i8.iXLen(iXLen 3, %vd, %vs2, i8 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xvv.nxv4i8.iXLen.i8.iXLen(iXLen, , , i8, iXLen) + +define @test_sf_vc_v_xvv_e8m1( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xvv_e8m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xvv.nxv8i8.iXLen.i8.iXLen(iXLen 3, %vd, %vs2, i8 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xvv.nxv8i8.iXLen.i8.iXLen(iXLen, , , i8, iXLen) + +define @test_sf_vc_v_xvv_e8m2( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xvv_e8m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: sf.vc.v.xvv 3, v8, v10, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xvv.nxv16i8.iXLen.i8.iXLen(iXLen 3, %vd, %vs2, i8 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xvv.nxv16i8.iXLen.i8.iXLen(iXLen, , , i8, iXLen) + +define @test_sf_vc_v_xvv_e8m4( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xvv_e8m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: sf.vc.v.xvv 3, v8, v12, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xvv.nxv32i8.iXLen.i8.iXLen(iXLen 3, %vd, %vs2, i8 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xvv.nxv32i8.iXLen.i8.iXLen(iXLen, , , i8, iXLen) + +define @test_sf_vc_v_xvv_e8m8( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xvv_e8m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: sf.vc.v.xvv 3, v8, v16, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xvv.nxv64i8.iXLen.i8.iXLen(iXLen 3, %vd, %vs2, i8 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xvv.nxv64i8.iXLen.i8.iXLen(iXLen, , , i8, iXLen) + +define @test_sf_vc_v_xvv_e16mf4( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xvv_e16mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xvv.nxv1i16.iXLen.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xvv.nxv1i16.iXLen.i16.iXLen(iXLen, , , i16, iXLen) + +define @test_sf_vc_v_xvv_e16mf2( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xvv_e16mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xvv.nxv2i16.iXLen.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xvv.nxv2i16.iXLen.i16.iXLen(iXLen, , , i16, iXLen) + +define @test_sf_vc_v_xvv_e16m1( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xvv_e16m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xvv.nxv4i16.iXLen.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xvv.nxv4i16.iXLen.i16.iXLen(iXLen, , , i16, iXLen) + +define @test_sf_vc_v_xvv_e16m2( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xvv_e16m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.v.xvv 3, v8, v10, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xvv.nxv8i16.iXLen.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xvv.nxv8i16.iXLen.i16.iXLen(iXLen, , , i16, iXLen) + +define @test_sf_vc_v_xvv_e16m4( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xvv_e16m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: sf.vc.v.xvv 3, v8, v12, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xvv.nxv16i16.iXLen.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xvv.nxv16i16.iXLen.i16.iXLen(iXLen, , , i16, iXLen) + +define @test_sf_vc_v_xvv_e16m8( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xvv_e16m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: sf.vc.v.xvv 3, v8, v16, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xvv.nxv32i16.iXLen.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xvv.nxv32i16.iXLen.i16.iXLen(iXLen, , , i16, iXLen) + +define @test_sf_vc_v_xvv_e32mf2( %vd, %vs2, i32 signext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xvv_e32mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xvv.nxv1i32.iXLen.i32.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xvv.nxv1i32.iXLen.i32.iXLen(iXLen, , , i32, iXLen) + +define @test_sf_vc_v_xvv_e32m1( %vd, %vs2, i32 signext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xvv_e32m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xvv.nxv2i32.iXLen.i32.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xvv.nxv2i32.iXLen.i32.iXLen(iXLen, , , i32, iXLen) + +define @test_sf_vc_v_xvv_e32m2( %vd, %vs2, i32 signext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xvv_e32m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.v.xvv 3, v8, v10, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xvv.nxv4i32.iXLen.i32.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xvv.nxv4i32.iXLen.i32.iXLen(iXLen, , , i32, iXLen) + +define @test_sf_vc_v_xvv_e32m4( %vd, %vs2, i32 signext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xvv_e32m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.v.xvv 3, v8, v12, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xvv.nxv8i32.iXLen.i32.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xvv.nxv8i32.iXLen.i32.iXLen(iXLen, , , i32, iXLen) + +define @test_sf_vc_v_xvv_e32m8( %vd, %vs2, i32 signext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xvv_e32m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: sf.vc.v.xvv 3, v8, v16, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xvv.nxv16i32.iXLen.i32.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xvv.nxv16i32.iXLen.i32.iXLen(iXLen, , , i32, iXLen) + +define void @test_sf_vc_ivv_se_e8mf8( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_ivv_se_e8mf8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1i8.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define void @test_sf_vc_ivv_se_e8mf4( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_ivv_se_e8mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2i8.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define void @test_sf_vc_ivv_se_e8mf2( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_ivv_se_e8mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4i8.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define void @test_sf_vc_ivv_se_e8m1( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_ivv_se_e8m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8i8.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define void @test_sf_vc_ivv_se_e8m2( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_ivv_se_e8m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: sf.vc.ivv 3, v8, v10, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16i8.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define void @test_sf_vc_ivv_se_e8m4( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_ivv_se_e8m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: sf.vc.ivv 3, v8, v12, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv32i8.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv32i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define void @test_sf_vc_ivv_se_e8m8( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_ivv_se_e8m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: sf.vc.ivv 3, v8, v16, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv64i8.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv64i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define void @test_sf_vc_ivv_se_e16mf4( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_ivv_se_e16mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1i16.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define void @test_sf_vc_ivv_se_e16mf2( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_ivv_se_e16mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2i16.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define void @test_sf_vc_ivv_se_e16m1( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_ivv_se_e16m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4i16.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define void @test_sf_vc_ivv_se_e16m2( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_ivv_se_e16m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.ivv 3, v8, v10, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8i16.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define void @test_sf_vc_ivv_se_e16m4( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_ivv_se_e16m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: sf.vc.ivv 3, v8, v12, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16i16.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define void @test_sf_vc_ivv_se_e16m8( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_ivv_se_e16m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: sf.vc.ivv 3, v8, v16, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv32i16.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv32i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define void @test_sf_vc_ivv_se_e32mf2( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_ivv_se_e32mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1i32.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define void @test_sf_vc_ivv_se_e32m1( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_ivv_se_e32m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2i32.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define void @test_sf_vc_ivv_se_e32m2( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_ivv_se_e32m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.ivv 3, v8, v10, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4i32.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define void @test_sf_vc_ivv_se_e32m4( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_ivv_se_e32m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.ivv 3, v8, v12, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8i32.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define void @test_sf_vc_ivv_se_e32m8( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_ivv_se_e32m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: sf.vc.ivv 3, v8, v16, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16i32.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define void @test_sf_vc_ivv_se_e64m1( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_ivv_se_e64m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1i64.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1i64.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define void @test_sf_vc_ivv_se_e64m2( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_ivv_se_e64m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: sf.vc.ivv 3, v8, v10, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2i64.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2i64.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define void @test_sf_vc_ivv_se_e64m4( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_ivv_se_e64m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: sf.vc.ivv 3, v8, v12, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4i64.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4i64.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define void @test_sf_vc_ivv_se_e64m8( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_ivv_se_e64m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: sf.vc.ivv 3, v8, v16, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8i64.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8i64.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivv_se_e8mf8( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivv_se_e8mf8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivv.se.nxv1i8.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivv.se.nxv1i8.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivv_se_e8mf4( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivv_se_e8mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivv.se.nxv2i8.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivv.se.nxv2i8.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivv_se_e8mf2( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivv_se_e8mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivv.se.nxv4i8.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivv.se.nxv4i8.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivv_se_e8m1( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivv_se_e8m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivv.se.nxv8i8.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivv.se.nxv8i8.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivv_se_e8m2( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivv_se_e8m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: sf.vc.v.ivv 3, v8, v10, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivv.se.nxv16i8.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivv.se.nxv16i8.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivv_se_e8m4( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivv_se_e8m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: sf.vc.v.ivv 3, v8, v12, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivv.se.nxv32i8.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivv.se.nxv32i8.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivv_se_e8m8( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivv_se_e8m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: sf.vc.v.ivv 3, v8, v16, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivv.se.nxv64i8.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivv.se.nxv64i8.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivv_se_e16mf4( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivv_se_e16mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivv.se.nxv1i16.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivv.se.nxv1i16.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivv_se_e16mf2( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivv_se_e16mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivv.se.nxv2i16.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivv.se.nxv2i16.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivv_se_e16m1( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivv_se_e16m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivv.se.nxv4i16.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivv.se.nxv4i16.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivv_se_e16m2( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivv_se_e16m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.v.ivv 3, v8, v10, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivv.se.nxv8i16.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivv.se.nxv8i16.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivv_se_e16m4( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivv_se_e16m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: sf.vc.v.ivv 3, v8, v12, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivv.se.nxv16i16.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivv.se.nxv16i16.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivv_se_e16m8( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivv_se_e16m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: sf.vc.v.ivv 3, v8, v16, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivv.se.nxv32i16.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivv.se.nxv32i16.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivv_se_e32mf2( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivv_se_e32mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivv.se.nxv1i32.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivv.se.nxv1i32.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivv_se_e32m1( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivv_se_e32m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivv.se.nxv2i32.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivv.se.nxv2i32.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivv_se_e32m2( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivv_se_e32m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.v.ivv 3, v8, v10, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivv.se.nxv4i32.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivv.se.nxv4i32.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivv_se_e32m4( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivv_se_e32m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.v.ivv 3, v8, v12, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivv.se.nxv8i32.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivv.se.nxv8i32.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivv_se_e32m8( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivv_se_e32m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: sf.vc.v.ivv 3, v8, v16, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivv.se.nxv16i32.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivv.se.nxv16i32.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivv_se_e64m1( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivv_se_e64m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivv.se.nxv1i64.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivv.se.nxv1i64.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivv_se_e64m2( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivv_se_e64m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: sf.vc.v.ivv 3, v8, v10, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivv.se.nxv2i64.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivv.se.nxv2i64.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivv_se_e64m4( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivv_se_e64m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: sf.vc.v.ivv 3, v8, v12, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivv.se.nxv4i64.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivv.se.nxv4i64.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivv_se_e64m8( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivv_se_e64m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: sf.vc.v.ivv 3, v8, v16, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivv.se.nxv8i64.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivv.se.nxv8i64.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivv_e8mf8( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivv_e8mf8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivv.nxv1i8.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivv.nxv1i8.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivv_e8mf4( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivv_e8mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivv.nxv2i8.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivv.nxv2i8.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivv_e8mf2( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivv_e8mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivv.nxv4i8.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivv.nxv4i8.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivv_e8m1( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivv_e8m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivv.nxv8i8.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivv.nxv8i8.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivv_e8m2( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivv_e8m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: sf.vc.v.ivv 3, v8, v10, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivv.nxv16i8.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivv.nxv16i8.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivv_e8m4( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivv_e8m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: sf.vc.v.ivv 3, v8, v12, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivv.nxv32i8.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivv.nxv32i8.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivv_e8m8( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivv_e8m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: sf.vc.v.ivv 3, v8, v16, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivv.nxv64i8.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivv.nxv64i8.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivv_e16mf4( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivv_e16mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivv.nxv1i16.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivv.nxv1i16.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivv_e16mf2( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivv_e16mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivv.nxv2i16.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivv.nxv2i16.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivv_e16m1( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivv_e16m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivv.nxv4i16.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivv.nxv4i16.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivv_e16m2( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivv_e16m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.v.ivv 3, v8, v10, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivv.nxv8i16.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivv.nxv8i16.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivv_e16m4( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivv_e16m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: sf.vc.v.ivv 3, v8, v12, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivv.nxv16i16.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivv.nxv16i16.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivv_e16m8( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivv_e16m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: sf.vc.v.ivv 3, v8, v16, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivv.nxv32i16.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivv.nxv32i16.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivv_e32mf2( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivv_e32mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivv.nxv1i32.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivv.nxv1i32.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivv_e32m1( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivv_e32m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivv.nxv2i32.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivv.nxv2i32.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivv_e32m2( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivv_e32m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.v.ivv 3, v8, v10, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivv.nxv4i32.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivv.nxv4i32.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivv_e32m4( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivv_e32m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.v.ivv 3, v8, v12, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivv.nxv8i32.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivv.nxv8i32.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivv_e32m8( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivv_e32m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: sf.vc.v.ivv 3, v8, v16, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivv.nxv16i32.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivv.nxv16i32.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivv_e64m1( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivv_e64m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivv.nxv1i64.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivv.nxv1i64.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivv_e64m2( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivv_e64m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: sf.vc.v.ivv 3, v8, v10, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivv.nxv2i64.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivv.nxv2i64.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivv_e64m4( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivv_e64m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: sf.vc.v.ivv 3, v8, v12, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivv.nxv4i64.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivv.nxv4i64.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivv_e64m8( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivv_e64m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: sf.vc.v.ivv 3, v8, v16, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivv.nxv8i64.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivv.nxv8i64.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define void @test_sf_vc_fvv_se_e16mf4( %vd, %vs2, half %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvv_se_e16mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1i16.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1i16.f16.iXLen(iXLen, , , half, iXLen) + +define void @test_sf_vc_fvv_se_e16mf2( %vd, %vs2, half %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvv_se_e16mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2i16.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2i16.f16.iXLen(iXLen, , , half, iXLen) + +define void @test_sf_vc_fvv_se_e16m1( %vd, %vs2, half %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvv_se_e16m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4i16.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4i16.f16.iXLen(iXLen, , , half, iXLen) + +define void @test_sf_vc_fvv_se_e16m2( %vd, %vs2, half %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvv_se_e16m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.fvv 1, v8, v10, fa0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8i16.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8i16.f16.iXLen(iXLen, , , half, iXLen) + +define void @test_sf_vc_fvv_se_e16m4( %vd, %vs2, half %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvv_se_e16m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: sf.vc.fvv 1, v8, v12, fa0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv16i16.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv16i16.f16.iXLen(iXLen, , , half, iXLen) + +define void @test_sf_vc_fvv_se_e16m8( %vd, %vs2, half %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvv_se_e16m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: sf.vc.fvv 1, v8, v16, fa0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv32i16.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv32i16.f16.iXLen(iXLen, , , half, iXLen) + +define void @test_sf_vc_fvv_se_e32mf2( %vd, %vs2, float %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvv_se_e32mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1i32.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1i32.f32.iXLen(iXLen, , , float, iXLen) + +define void @test_sf_vc_fvv_se_e32m1( %vd, %vs2, float %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvv_se_e32m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2i32.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2i32.f32.iXLen(iXLen, , , float, iXLen) + +define void @test_sf_vc_fvv_se_e32m2( %vd, %vs2, float %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvv_se_e32m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.fvv 1, v8, v10, fa0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4i32.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4i32.f32.iXLen(iXLen, , , float, iXLen) + +define void @test_sf_vc_fvv_se_e32m4( %vd, %vs2, float %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvv_se_e32m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.fvv 1, v8, v12, fa0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8i32.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8i32.f32.iXLen(iXLen, , , float, iXLen) + +define void @test_sf_vc_fvv_se_e32m8( %vd, %vs2, float %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvv_se_e32m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: sf.vc.fvv 1, v8, v16, fa0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv16i32.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv16i32.f32.iXLen(iXLen, , , float, iXLen) + +define void @test_sf_vc_fvv_se_e64m1( %vd, %vs2, double %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvv_se_e64m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1i64.f64.iXLen(iXLen 1, %vd, %vs2, double %fs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1i64.f64.iXLen(iXLen, , , double, iXLen) + +define void @test_sf_vc_fvv_se_e64m2( %vd, %vs2, double %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvv_se_e64m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: sf.vc.fvv 1, v8, v10, fa0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2i64.f64.iXLen(iXLen 1, %vd, %vs2, double %fs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2i64.f64.iXLen(iXLen, , , double, iXLen) + +define void @test_sf_vc_fvv_se_e64m4( %vd, %vs2, double %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvv_se_e64m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: sf.vc.fvv 1, v8, v12, fa0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4i64.f64.iXLen(iXLen 1, %vd, %vs2, double %fs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4i64.f64.iXLen(iXLen, , , double, iXLen) + +define void @test_sf_vc_fvv_se_e64m8( %vd, %vs2, double %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvv_se_e64m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: sf.vc.fvv 1, v8, v16, fa0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8i64.f64.iXLen(iXLen 1, %vd, %vs2, double %fs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8i64.f64.iXLen(iXLen, , , double, iXLen) + +define @test_sf_vc_v_fvv_se_e16mf4( %vd, %vs2, half %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvv_se_e16mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fvv.se.nxv1i16.iXLen.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fvv.se.nxv1i16.iXLen.f16.iXLen(iXLen, , , half, iXLen) + +define @test_sf_vc_v_fvv_se_e16mf2( %vd, %vs2, half %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvv_se_e16mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fvv.se.nxv2i16.iXLen.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fvv.se.nxv2i16.iXLen.f16.iXLen(iXLen, , , half, iXLen) + +define @test_sf_vc_v_fvv_se_e16m1( %vd, %vs2, half %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvv_se_e16m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fvv.se.nxv4i16.iXLen.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fvv.se.nxv4i16.iXLen.f16.iXLen(iXLen, , , half, iXLen) + +define @test_sf_vc_v_fvv_se_e16m2( %vd, %vs2, half %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvv_se_e16m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.v.fvv 1, v8, v10, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fvv.se.nxv8i16.iXLen.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fvv.se.nxv8i16.iXLen.f16.iXLen(iXLen, , , half, iXLen) + +define @test_sf_vc_v_fvv_se_e16m4( %vd, %vs2, half %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvv_se_e16m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: sf.vc.v.fvv 1, v8, v12, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fvv.se.nxv16i16.iXLen.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fvv.se.nxv16i16.iXLen.f16.iXLen(iXLen, , , half, iXLen) + +define @test_sf_vc_v_fvv_se_e16m8( %vd, %vs2, half %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvv_se_e16m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: sf.vc.v.fvv 1, v8, v16, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fvv.se.nxv32i16.iXLen.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fvv.se.nxv32i16.iXLen.f16.iXLen(iXLen, , , half, iXLen) + +define @test_sf_vc_v_fvv_se_e32mf2( %vd, %vs2, float %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvv_se_e32mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fvv.se.nxv1i32.iXLen.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fvv.se.nxv1i32.iXLen.f32.iXLen(iXLen, , , float, iXLen) + +define @test_sf_vc_v_fvv_se_e32m1( %vd, %vs2, float %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvv_se_e32m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fvv.se.nxv2i32.iXLen.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fvv.se.nxv2i32.iXLen.f32.iXLen(iXLen, , , float, iXLen) + +define @test_sf_vc_v_fvv_se_e32m2( %vd, %vs2, float %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvv_se_e32m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.v.fvv 1, v8, v10, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fvv.se.nxv4i32.iXLen.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fvv.se.nxv4i32.iXLen.f32.iXLen(iXLen, , , float, iXLen) + +define @test_sf_vc_v_fvv_se_e32m4( %vd, %vs2, float %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvv_se_e32m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.v.fvv 1, v8, v12, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fvv.se.nxv8i32.iXLen.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fvv.se.nxv8i32.iXLen.f32.iXLen(iXLen, , , float, iXLen) + +define @test_sf_vc_v_fvv_se_e32m8( %vd, %vs2, float %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvv_se_e32m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: sf.vc.v.fvv 1, v8, v16, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fvv.se.nxv16i32.iXLen.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fvv.se.nxv16i32.iXLen.f32.iXLen(iXLen, , , float, iXLen) + +define @test_sf_vc_v_fvv_se_e64m1( %vd, %vs2, double %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvv_se_e64m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fvv.se.nxv1i64.iXLen.f64.iXLen(iXLen 1, %vd, %vs2, double %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fvv.se.nxv1i64.iXLen.f64.iXLen(iXLen, , , double, iXLen) + +define @test_sf_vc_v_fvv_se_e64m2( %vd, %vs2, double %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvv_se_e64m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: sf.vc.v.fvv 1, v8, v10, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fvv.se.nxv2i64.iXLen.f64.iXLen(iXLen 1, %vd, %vs2, double %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fvv.se.nxv2i64.iXLen.f64.iXLen(iXLen, , , double, iXLen) + +define @test_sf_vc_v_fvv_se_e64m4( %vd, %vs2, double %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvv_se_e64m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: sf.vc.v.fvv 1, v8, v12, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fvv.se.nxv4i64.iXLen.f64.iXLen(iXLen 1, %vd, %vs2, double %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fvv.se.nxv4i64.iXLen.f64.iXLen(iXLen, , , double, iXLen) + +define @test_sf_vc_v_fvv_se_e64m8( %vd, %vs2, double %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvv_se_e64m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: sf.vc.v.fvv 1, v8, v16, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fvv.se.nxv8i64.iXLen.f64.iXLen(iXLen 1, %vd, %vs2, double %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fvv.se.nxv8i64.iXLen.f64.iXLen(iXLen, , , double, iXLen) + +define @test_sf_vc_v_fvv_e16mf4( %vd, %vs2, half %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvv_e16mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fvv.nxv1i16.iXLen.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fvv.nxv1i16.iXLen.f16.iXLen(iXLen, , , half, iXLen) + +define @test_sf_vc_v_fvv_e16mf2( %vd, %vs2, half %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvv_e16mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fvv.nxv2i16.iXLen.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fvv.nxv2i16.iXLen.f16.iXLen(iXLen, , , half, iXLen) + +define @test_sf_vc_v_fvv_e16m1( %vd, %vs2, half %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvv_e16m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fvv.nxv4i16.iXLen.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fvv.nxv4i16.iXLen.f16.iXLen(iXLen, , , half, iXLen) + +define @test_sf_vc_v_fvv_e16m2( %vd, %vs2, half %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvv_e16m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.v.fvv 1, v8, v10, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fvv.nxv8i16.iXLen.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fvv.nxv8i16.iXLen.f16.iXLen(iXLen, , , half, iXLen) + +define @test_sf_vc_v_fvv_e16m4( %vd, %vs2, half %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvv_e16m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: sf.vc.v.fvv 1, v8, v12, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fvv.nxv16i16.iXLen.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fvv.nxv16i16.iXLen.f16.iXLen(iXLen, , , half, iXLen) + +define @test_sf_vc_v_fvv_e16m8( %vd, %vs2, half %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvv_e16m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: sf.vc.v.fvv 1, v8, v16, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fvv.nxv32i16.iXLen.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fvv.nxv32i16.iXLen.f16.iXLen(iXLen, , , half, iXLen) + +define @test_sf_vc_v_fvv_e32mf2( %vd, %vs2, float %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvv_e32mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fvv.nxv1i32.iXLen.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fvv.nxv1i32.iXLen.f32.iXLen(iXLen, , , float, iXLen) + +define @test_sf_vc_v_fvv_e32m1( %vd, %vs2, float %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvv_e32m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fvv.nxv2i32.iXLen.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fvv.nxv2i32.iXLen.f32.iXLen(iXLen, , , float, iXLen) + +define @test_sf_vc_v_fvv_e32m2( %vd, %vs2, float %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvv_e32m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.v.fvv 1, v8, v10, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fvv.nxv4i32.iXLen.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fvv.nxv4i32.iXLen.f32.iXLen(iXLen, , , float, iXLen) + +define @test_sf_vc_v_fvv_e32m4( %vd, %vs2, float %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvv_e32m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.v.fvv 1, v8, v12, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fvv.nxv8i32.iXLen.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fvv.nxv8i32.iXLen.f32.iXLen(iXLen, , , float, iXLen) + +define @test_sf_vc_v_fvv_e32m8( %vd, %vs2, float %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvv_e32m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: sf.vc.v.fvv 1, v8, v16, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fvv.nxv16i32.iXLen.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fvv.nxv16i32.iXLen.f32.iXLen(iXLen, , , float, iXLen) + +define @test_sf_vc_v_fvv_e64m1( %vd, %vs2, double %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvv_e64m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fvv.nxv1i64.iXLen.f64.iXLen(iXLen 1, %vd, %vs2, double %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fvv.nxv1i64.iXLen.f64.iXLen(iXLen, , , double, iXLen) + +define @test_sf_vc_v_fvv_e64m2( %vd, %vs2, double %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvv_e64m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: sf.vc.v.fvv 1, v8, v10, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fvv.nxv2i64.iXLen.f64.iXLen(iXLen 1, %vd, %vs2, double %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fvv.nxv2i64.iXLen.f64.iXLen(iXLen, , , double, iXLen) + +define @test_sf_vc_v_fvv_e64m4( %vd, %vs2, double %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvv_e64m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: sf.vc.v.fvv 1, v8, v12, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fvv.nxv4i64.iXLen.f64.iXLen(iXLen 1, %vd, %vs2, double %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fvv.nxv4i64.iXLen.f64.iXLen(iXLen, , , double, iXLen) + +define @test_sf_vc_v_fvv_e64m8( %vd, %vs2, double %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvv_e64m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: sf.vc.v.fvv 1, v8, v16, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fvv.nxv8i64.iXLen.f64.iXLen(iXLen 1, %vd, %vs2, double %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fvv.nxv8i64.iXLen.f64.iXLen(iXLen, , , double, iXLen) diff --git a/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvw.ll b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvw.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvw.ll @@ -0,0 +1,2111 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+xsfvcp \ +; RUN: -verify-machineinstrs | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+xsfvcp \ +; RUN: -verify-machineinstrs | FileCheck %s + +define void @test_sf_vc_vvw_se_e8mf8( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_vvw_se_e8mf8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: sf.vc.vvw 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1i16.nxv1i8.nxv1i8.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1i16.nxv1i8.nxv1i8.iXLen(iXLen, , , , iXLen) + +define void @test_sf_vc_vvw_se_e8mf4( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_vvw_se_e8mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: sf.vc.vvw 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2i16.nxv2i8.nxv2i8.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2i16.nxv2i8.nxv2i8.iXLen(iXLen, , , , iXLen) + +define void @test_sf_vc_vvw_se_e8mf2( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_vvw_se_e8mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: sf.vc.vvw 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4i16.nxv4i8.nxv4i8.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4i16.nxv4i8.nxv4i8.iXLen(iXLen, , , , iXLen) + +define void @test_sf_vc_vvw_se_e8m1( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_vvw_se_e8m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: sf.vc.vvw 3, v8, v10, v11 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8i16.nxv8i8.nxv8i8.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8i16.nxv8i8.nxv8i8.iXLen(iXLen, , , , iXLen) + +define void @test_sf_vc_vvw_se_e8m2( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_vvw_se_e8m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: sf.vc.vvw 3, v8, v12, v14 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv16i16.nxv16i8.nxv16i8.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv16i16.nxv16i8.nxv16i8.iXLen(iXLen, , , , iXLen) + +define void @test_sf_vc_vvw_se_e8m4( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_vvw_se_e8m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: sf.vc.vvw 3, v8, v16, v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv32i16.nxv32i8.nxv32i8.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv32i16.nxv32i8.nxv32i8.iXLen(iXLen, , , , iXLen) + +define void @test_sf_vc_vvw_se_e16mf4( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_vvw_se_e16mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.vvw 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1i32.nxv1i16.nxv1i16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1i32.nxv1i16.nxv1i16.iXLen(iXLen, , , , iXLen) + +define void @test_sf_vc_vvw_se_e16mf2( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_vvw_se_e16mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.vvw 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2i32.nxv2i16.nxv2i16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2i32.nxv2i16.nxv2i16.iXLen(iXLen, , , , iXLen) + +define void @test_sf_vc_vvw_se_e16m1( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_vvw_se_e16m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.vvw 3, v8, v10, v11 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4i32.nxv4i16.nxv4i16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4i32.nxv4i16.nxv4i16.iXLen(iXLen, , , , iXLen) + +define void @test_sf_vc_vvw_se_e16m2( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_vvw_se_e16m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.vvw 3, v8, v12, v14 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8i32.nxv8i16.nxv8i16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8i32.nxv8i16.nxv8i16.iXLen(iXLen, , , , iXLen) + +define void @test_sf_vc_vvw_se_e16m4( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_vvw_se_e16m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: sf.vc.vvw 3, v8, v16, v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv16i32.nxv16i16.nxv16i16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv16i32.nxv16i16.nxv16i16.iXLen(iXLen, , , , iXLen) + +define void @test_sf_vc_vvw_se_e32mf2( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_vvw_se_e32mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.vvw 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1i64.nxv1i32.nxv1i32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1i64.nxv1i32.nxv1i32.iXLen(iXLen, , , , iXLen) + +define void @test_sf_vc_vvw_se_e32m1( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_vvw_se_e32m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: sf.vc.vvw 3, v8, v10, v11 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2i64.nxv2i32.nxv2i32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2i64.nxv2i32.nxv2i32.iXLen(iXLen, , , , iXLen) + +define void @test_sf_vc_vvw_se_e32m2( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_vvw_se_e32m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.vvw 3, v8, v12, v14 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4i64.nxv4i32.nxv4i32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4i64.nxv4i32.nxv4i32.iXLen(iXLen, , , , iXLen) + +define void @test_sf_vc_vvw_se_e32m4( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_vvw_se_e32m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.vvw 3, v8, v16, v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8i64.nxv8i32.nxv8i32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8i64.nxv8i32.nxv8i32.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvw_se_e8mf8( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvw_se_e8mf8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvw.se.nxv1i16.iXLen.nxv1i8.nxv1i8.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvw.se.nxv1i16.iXLen.nxv1i8.nxv1i8.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvw_se_e8mf4( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvw_se_e8mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma +; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvw.se.nxv2i16.iXLen.nxv2i8.nxv2i8.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvw.se.nxv2i16.iXLen.nxv2i8.nxv2i8.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvw_se_e8mf2( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvw_se_e8mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma +; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvw.se.nxv4i16.iXLen.nxv4i8.nxv4i8.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvw.se.nxv4i16.iXLen.nxv4i8.nxv4i8.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvw_se_e8m1( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvw_se_e8m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma +; CHECK-NEXT: sf.vc.v.vvw 3, v8, v10, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvw.se.nxv8i16.iXLen.nxv8i8.nxv8i8.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvw.se.nxv8i16.iXLen.nxv8i8.nxv8i8.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvw_se_e8m2( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvw_se_e8m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma +; CHECK-NEXT: sf.vc.v.vvw 3, v8, v12, v14 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvw.se.nxv16i16.iXLen.nxv16i8.nxv16i8.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvw.se.nxv16i16.iXLen.nxv16i8.nxv16i8.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvw_se_e8m4( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvw_se_e8m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma +; CHECK-NEXT: sf.vc.v.vvw 3, v8, v16, v20 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvw.se.nxv32i16.iXLen.nxv32i8.nxv32i8.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvw.se.nxv32i16.iXLen.nxv32i8.nxv32i8.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvw_se_e16mf4( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvw_se_e16mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvw.se.nxv1i32.iXLen.nxv1i16.nxv1i16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvw.se.nxv1i32.iXLen.nxv1i16.nxv1i16.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvw_se_e16mf2( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvw_se_e16mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma +; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvw.se.nxv2i32.iXLen.nxv2i16.nxv2i16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvw.se.nxv2i32.iXLen.nxv2i16.nxv2i16.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvw_se_e16m1( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvw_se_e16m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: sf.vc.v.vvw 3, v8, v10, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvw.se.nxv4i32.iXLen.nxv4i16.nxv4i16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvw.se.nxv4i32.iXLen.nxv4i16.nxv4i16.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvw_se_e16m2( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvw_se_e16m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma +; CHECK-NEXT: sf.vc.v.vvw 3, v8, v12, v14 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvw.se.nxv8i32.iXLen.nxv8i16.nxv8i16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvw.se.nxv8i32.iXLen.nxv8i16.nxv8i16.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvw_se_e16m4( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvw_se_e16m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma +; CHECK-NEXT: sf.vc.v.vvw 3, v8, v16, v20 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvw.se.nxv16i32.iXLen.nxv16i16.nxv16i16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvw.se.nxv16i32.iXLen.nxv16i16.nxv16i16.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvw_se_e32mf2( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvw_se_e32mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvw.se.nxv1i64.iXLen.nxv1i32.nxv1i32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvw.se.nxv1i64.iXLen.nxv1i32.nxv1i32.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvw_se_e32m1( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvw_se_e32m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: sf.vc.v.vvw 3, v8, v10, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvw.se.nxv2i64.iXLen.nxv2i32.nxv2i32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvw.se.nxv2i64.iXLen.nxv2i32.nxv2i32.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvw_se_e32m2( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvw_se_e32m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: sf.vc.v.vvw 3, v8, v12, v14 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvw.se.nxv4i64.iXLen.nxv4i32.nxv4i32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvw.se.nxv4i64.iXLen.nxv4i32.nxv4i32.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvw_se_e32m4( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvw_se_e32m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: sf.vc.v.vvw 3, v8, v16, v20 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvw.se.nxv8i64.iXLen.nxv8i32.nxv8i32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvw.se.nxv8i64.iXLen.nxv8i32.nxv8i32.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvw_e8mf8( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvw_e8mf8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvw.nxv1i16.iXLen.nxv1i8.nxv1i8.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvw.nxv1i16.iXLen.nxv1i8.nxv1i8.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvw_e8mf4( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvw_e8mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma +; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvw.nxv2i16.iXLen.nxv2i8.nxv2i8.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvw.nxv2i16.iXLen.nxv2i8.nxv2i8.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvw_e8mf2( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvw_e8mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma +; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvw.nxv4i16.iXLen.nxv4i8.nxv4i8.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvw.nxv4i16.iXLen.nxv4i8.nxv4i8.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvw_e8m1( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvw_e8m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma +; CHECK-NEXT: sf.vc.v.vvw 3, v8, v10, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvw.nxv8i16.iXLen.nxv8i8.nxv8i8.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvw.nxv8i16.iXLen.nxv8i8.nxv8i8.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvw_e8m2( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvw_e8m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma +; CHECK-NEXT: sf.vc.v.vvw 3, v8, v12, v14 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvw.nxv16i16.iXLen.nxv16i8.nxv16i8.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvw.nxv16i16.iXLen.nxv16i8.nxv16i8.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvw_e8m4( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvw_e8m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma +; CHECK-NEXT: sf.vc.v.vvw 3, v8, v16, v20 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvw.nxv32i16.iXLen.nxv32i8.nxv32i8.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvw.nxv32i16.iXLen.nxv32i8.nxv32i8.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvw_e16mf4( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvw_e16mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvw.nxv1i32.iXLen.nxv1i16.nxv1i16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvw.nxv1i32.iXLen.nxv1i16.nxv1i16.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvw_e16mf2( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvw_e16mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma +; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvw.nxv2i32.iXLen.nxv2i16.nxv2i16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvw.nxv2i32.iXLen.nxv2i16.nxv2i16.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvw_e16m1( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvw_e16m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: sf.vc.v.vvw 3, v8, v10, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvw.nxv4i32.iXLen.nxv4i16.nxv4i16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvw.nxv4i32.iXLen.nxv4i16.nxv4i16.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvw_e16m2( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvw_e16m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma +; CHECK-NEXT: sf.vc.v.vvw 3, v8, v12, v14 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvw.nxv8i32.iXLen.nxv8i16.nxv8i16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvw.nxv8i32.iXLen.nxv8i16.nxv8i16.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvw_e16m4( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvw_e16m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma +; CHECK-NEXT: sf.vc.v.vvw 3, v8, v16, v20 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvw.nxv16i32.iXLen.nxv16i16.nxv16i16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvw.nxv16i32.iXLen.nxv16i16.nxv16i16.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvw_e32mf2( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvw_e32mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvw.nxv1i64.iXLen.nxv1i32.nxv1i32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvw.nxv1i64.iXLen.nxv1i32.nxv1i32.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvw_e32m1( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvw_e32m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: sf.vc.v.vvw 3, v8, v10, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvw.nxv2i64.iXLen.nxv2i32.nxv2i32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvw.nxv2i64.iXLen.nxv2i32.nxv2i32.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvw_e32m2( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvw_e32m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: sf.vc.v.vvw 3, v8, v12, v14 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvw.nxv4i64.iXLen.nxv4i32.nxv4i32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvw.nxv4i64.iXLen.nxv4i32.nxv4i32.iXLen(iXLen, , , , iXLen) + +define @test_sf_vc_v_vvw_e32m4( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_vvw_e32m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: sf.vc.v.vvw 3, v8, v16, v20 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.vvw.nxv8i64.iXLen.nxv8i32.nxv8i32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.vvw.nxv8i64.iXLen.nxv8i32.nxv8i32.iXLen(iXLen, , , , iXLen) + +define void @test_sf_vc_xvw_se_e8mf8( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_xvw_se_e8mf8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: sf.vc.xvw 3, v8, v9, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1i16.nxv1i8.i8.iXLen(iXLen 3, %vd, %vs2, i8 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1i16.nxv1i8.i8.iXLen(iXLen, , , i8, iXLen) + +define void @test_sf_vc_xvw_se_e8mf4( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_xvw_se_e8mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: sf.vc.xvw 3, v8, v9, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2i16.nxv2i8.i8.iXLen(iXLen 3, %vd, %vs2, i8 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2i16.nxv2i8.i8.iXLen(iXLen, , , i8, iXLen) + +define void @test_sf_vc_xvw_se_e8mf2( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_xvw_se_e8mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: sf.vc.xvw 3, v8, v9, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4i16.nxv4i8.i8.iXLen(iXLen 3, %vd, %vs2, i8 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4i16.nxv4i8.i8.iXLen(iXLen, , , i8, iXLen) + +define void @test_sf_vc_xvw_se_e8m1( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_xvw_se_e8m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: sf.vc.xvw 3, v8, v10, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8i16.nxv8i8.i8.iXLen(iXLen 3, %vd, %vs2, i8 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8i16.nxv8i8.i8.iXLen(iXLen, , , i8, iXLen) + +define void @test_sf_vc_xvw_se_e8m2( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_xvw_se_e8m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: sf.vc.xvw 3, v8, v12, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv16i16.nxv16i8.i8.iXLen(iXLen 3, %vd, %vs2, i8 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv16i16.nxv16i8.i8.iXLen(iXLen, , , i8, iXLen) + +define void @test_sf_vc_xvw_se_e8m4( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_xvw_se_e8m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: sf.vc.xvw 3, v8, v16, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv32i16.nxv32i8.i8.iXLen(iXLen 3, %vd, %vs2, i8 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv32i16.nxv32i8.i8.iXLen(iXLen, , , i8, iXLen) + +define void @test_sf_vc_xvw_se_e16mf4( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_xvw_se_e16mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.xvw 3, v8, v9, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1i32.nxv1i16.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1i32.nxv1i16.i16.iXLen(iXLen, , , i16, iXLen) + +define void @test_sf_vc_xvw_se_e16mf2( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_xvw_se_e16mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.xvw 3, v8, v9, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2i32.nxv2i16.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2i32.nxv2i16.i16.iXLen(iXLen, , , i16, iXLen) + +define void @test_sf_vc_xvw_se_e16m1( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_xvw_se_e16m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.xvw 3, v8, v10, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4i32.nxv4i16.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4i32.nxv4i16.i16.iXLen(iXLen, , , i16, iXLen) + +define void @test_sf_vc_xvw_se_e16m2( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_xvw_se_e16m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.xvw 3, v8, v12, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8i32.nxv8i16.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8i32.nxv8i16.i16.iXLen(iXLen, , , i16, iXLen) + +define void @test_sf_vc_xvw_se_e16m4( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_xvw_se_e16m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: sf.vc.xvw 3, v8, v16, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv16i32.nxv16i16.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv16i32.nxv16i16.i16.iXLen(iXLen, , , i16, iXLen) + +define void @test_sf_vc_xvw_se_e32mf2( %vd, %vs2, i32 signext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_xvw_se_e32mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.xvw 3, v8, v9, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1i64.nxv1i32.i32.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1i64.nxv1i32.i32.iXLen(iXLen, , , i32, iXLen) + +define void @test_sf_vc_xvw_se_e32m1( %vd, %vs2, i32 signext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_xvw_se_e32m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: sf.vc.xvw 3, v8, v10, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2i64.nxv2i32.i32.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2i64.nxv2i32.i32.iXLen(iXLen, , , i32, iXLen) + +define void @test_sf_vc_xvw_se_e32m2( %vd, %vs2, i32 signext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_xvw_se_e32m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.xvw 3, v8, v12, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4i64.nxv4i32.i32.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4i64.nxv4i32.i32.iXLen(iXLen, , , i32, iXLen) + +define void @test_sf_vc_xvw_se_e32m4( %vd, %vs2, i32 signext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_xvw_se_e32m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.xvw 3, v8, v16, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8i64.nxv8i32.i32.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8i64.nxv8i32.i32.iXLen(iXLen, , , i32, iXLen) + +define @test_sf_vc_v_xvw_se_e8mf8( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xvw_se_e8mf8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma +; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xvw.se.nxv1i16.iXLen.nxv1i8.i8.iXLen(iXLen 3, %vd, %vs2, i8 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xvw.se.nxv1i16.iXLen.nxv1i8.i8.iXLen(iXLen, , , i8, iXLen) + +define @test_sf_vc_v_xvw_se_e8mf4( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xvw_se_e8mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma +; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xvw.se.nxv2i16.iXLen.nxv2i8.i8.iXLen(iXLen 3, %vd, %vs2, i8 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xvw.se.nxv2i16.iXLen.nxv2i8.i8.iXLen(iXLen, , , i8, iXLen) + +define @test_sf_vc_v_xvw_se_e8mf2( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xvw_se_e8mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma +; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xvw.se.nxv4i16.iXLen.nxv4i8.i8.iXLen(iXLen 3, %vd, %vs2, i8 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xvw.se.nxv4i16.iXLen.nxv4i8.i8.iXLen(iXLen, , , i8, iXLen) + +define @test_sf_vc_v_xvw_se_e8m1( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xvw_se_e8m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma +; CHECK-NEXT: sf.vc.v.xvw 3, v8, v10, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xvw.se.nxv8i16.iXLen.nxv8i8.i8.iXLen(iXLen 3, %vd, %vs2, i8 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xvw.se.nxv8i16.iXLen.nxv8i8.i8.iXLen(iXLen, , , i8, iXLen) + +define @test_sf_vc_v_xvw_se_e8m2( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xvw_se_e8m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, ma +; CHECK-NEXT: sf.vc.v.xvw 3, v8, v12, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xvw.se.nxv16i16.iXLen.nxv16i8.i8.iXLen(iXLen 3, %vd, %vs2, i8 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xvw.se.nxv16i16.iXLen.nxv16i8.i8.iXLen(iXLen, , , i8, iXLen) + +define @test_sf_vc_v_xvw_se_e8m4( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xvw_se_e8m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, ma +; CHECK-NEXT: sf.vc.v.xvw 3, v8, v16, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xvw.se.nxv32i16.iXLen.nxv32i8.i8.iXLen(iXLen 3, %vd, %vs2, i8 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xvw.se.nxv32i16.iXLen.nxv32i8.i8.iXLen(iXLen, , , i8, iXLen) + +define @test_sf_vc_v_xvw_se_e16mf4( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xvw_se_e16mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma +; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xvw.se.nxv1i32.iXLen.nxv1i16.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xvw.se.nxv1i32.iXLen.nxv1i16.i16.iXLen(iXLen, , , i16, iXLen) + +define @test_sf_vc_v_xvw_se_e16mf2( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xvw_se_e16mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma +; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xvw.se.nxv2i32.iXLen.nxv2i16.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xvw.se.nxv2i32.iXLen.nxv2i16.i16.iXLen(iXLen, , , i16, iXLen) + +define @test_sf_vc_v_xvw_se_e16m1( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xvw_se_e16m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma +; CHECK-NEXT: sf.vc.v.xvw 3, v8, v10, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xvw.se.nxv4i32.iXLen.nxv4i16.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xvw.se.nxv4i32.iXLen.nxv4i16.i16.iXLen(iXLen, , , i16, iXLen) + +define @test_sf_vc_v_xvw_se_e16m2( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xvw_se_e16m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, ma +; CHECK-NEXT: sf.vc.v.xvw 3, v8, v12, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xvw.se.nxv8i32.iXLen.nxv8i16.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xvw.se.nxv8i32.iXLen.nxv8i16.i16.iXLen(iXLen, , , i16, iXLen) + +define @test_sf_vc_v_xvw_se_e16m4( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xvw_se_e16m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, ma +; CHECK-NEXT: sf.vc.v.xvw 3, v8, v16, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xvw.se.nxv16i32.iXLen.nxv16i16.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xvw.se.nxv16i32.iXLen.nxv16i16.i16.iXLen(iXLen, , , i16, iXLen) + +define @test_sf_vc_v_xvw_se_e32mf2( %vd, %vs2, i32 signext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xvw_se_e32mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma +; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xvw.se.nxv1i64.i32.nxv1i32.iXLen.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xvw.se.nxv1i64.i32.nxv1i32.iXLen.iXLen(iXLen, , , i32, iXLen) + +define @test_sf_vc_v_xvw_se_e32m1( %vd, %vs2, i32 signext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xvw_se_e32m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma +; CHECK-NEXT: sf.vc.v.xvw 3, v8, v10, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xvw.se.nxv2i64.i32.nxv2i32.iXLen.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xvw.se.nxv2i64.i32.nxv2i32.iXLen.iXLen(iXLen, , , i32, iXLen) + +define @test_sf_vc_v_xvw_se_e32m2( %vd, %vs2, i32 signext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xvw_se_e32m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma +; CHECK-NEXT: sf.vc.v.xvw 3, v8, v12, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xvw.se.nxv4i64.i32.nxv4i32.iXLen.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xvw.se.nxv4i64.i32.nxv4i32.iXLen.iXLen(iXLen, , , i32, iXLen) + +define @test_sf_vc_v_xvw_se_e32m4( %vd, %vs2, i32 signext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xvw_se_e32m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, ma +; CHECK-NEXT: sf.vc.v.xvw 3, v8, v16, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xvw.se.nxv8i64.i32.nxv8i32.iXLen.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xvw.se.nxv8i64.i32.nxv8i32.iXLen.iXLen(iXLen, , , i32, iXLen) + +define @test_sf_vc_v_xvw_e8mf8( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xvw_e8mf8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma +; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xvw.nxv1i16.iXLen.nxv1i8.i8.iXLen(iXLen 3, %vd, %vs2, i8 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xvw.nxv1i16.iXLen.nxv1i8.i8.iXLen(iXLen, , , i8, iXLen) + +define @test_sf_vc_v_xvw_e8mf4( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xvw_e8mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma +; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xvw.nxv2i16.iXLen.nxv2i8.i8.iXLen(iXLen 3, %vd, %vs2, i8 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xvw.nxv2i16.iXLen.nxv2i8.i8.iXLen(iXLen, , , i8, iXLen) + +define @test_sf_vc_v_xvw_e8mf2( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xvw_e8mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma +; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xvw.nxv4i16.iXLen.nxv4i8.i8.iXLen(iXLen 3, %vd, %vs2, i8 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xvw.nxv4i16.iXLen.nxv4i8.i8.iXLen(iXLen, , , i8, iXLen) + +define @test_sf_vc_v_xvw_e8m1( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xvw_e8m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma +; CHECK-NEXT: sf.vc.v.xvw 3, v8, v10, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xvw.nxv8i16.iXLen.nxv8i8.i8.iXLen(iXLen 3, %vd, %vs2, i8 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xvw.nxv8i16.iXLen.nxv8i8.i8.iXLen(iXLen, , , i8, iXLen) + +define @test_sf_vc_v_xvw_e8m2( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xvw_e8m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, ma +; CHECK-NEXT: sf.vc.v.xvw 3, v8, v12, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xvw.nxv16i16.iXLen.nxv16i8.i8.iXLen(iXLen 3, %vd, %vs2, i8 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xvw.nxv16i16.iXLen.nxv16i8.i8.iXLen(iXLen, , , i8, iXLen) + +define @test_sf_vc_v_xvw_e8m4( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xvw_e8m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, ma +; CHECK-NEXT: sf.vc.v.xvw 3, v8, v16, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xvw.nxv32i16.iXLen.nxv32i8.i8.iXLen(iXLen 3, %vd, %vs2, i8 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xvw.nxv32i16.iXLen.nxv32i8.i8.iXLen(iXLen, , , i8, iXLen) + +define @test_sf_vc_v_xvw_e16mf4( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xvw_e16mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma +; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xvw.nxv1i32.iXLen.nxv1i16.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xvw.nxv1i32.iXLen.nxv1i16.i16.iXLen(iXLen, , , i16, iXLen) + +define @test_sf_vc_v_xvw_e16mf2( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xvw_e16mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma +; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xvw.nxv2i32.iXLen.nxv2i16.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xvw.nxv2i32.iXLen.nxv2i16.i16.iXLen(iXLen, , , i16, iXLen) + +define @test_sf_vc_v_xvw_e16m1( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xvw_e16m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma +; CHECK-NEXT: sf.vc.v.xvw 3, v8, v10, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xvw.nxv4i32.iXLen.nxv4i16.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xvw.nxv4i32.iXLen.nxv4i16.i16.iXLen(iXLen, , , i16, iXLen) + +define @test_sf_vc_v_xvw_e16m2( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xvw_e16m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, ma +; CHECK-NEXT: sf.vc.v.xvw 3, v8, v12, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xvw.nxv8i32.iXLen.nxv8i16.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xvw.nxv8i32.iXLen.nxv8i16.i16.iXLen(iXLen, , , i16, iXLen) + +define @test_sf_vc_v_xvw_e16m4( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xvw_e16m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, ma +; CHECK-NEXT: sf.vc.v.xvw 3, v8, v16, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xvw.nxv16i32.iXLen.nxv16i16.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xvw.nxv16i32.iXLen.nxv16i16.i16.iXLen(iXLen, , , i16, iXLen) + +define @test_sf_vc_v_xvw_e32mf2( %vd, %vs2, i32 signext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xvw_e32mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma +; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xvw.nxv1i64.iXLen.nxv1i32.i32.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xvw.nxv1i64.iXLen.nxv1i32.i32.iXLen(iXLen, , , i32, iXLen) + +define @test_sf_vc_v_xvw_e32m1( %vd, %vs2, i32 signext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xvw_e32m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma +; CHECK-NEXT: sf.vc.v.xvw 3, v8, v10, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xvw.nxv2i64.iXLen.nxv2i32.i32.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xvw.nxv2i64.iXLen.nxv2i32.i32.iXLen(iXLen, , , i32, iXLen) + +define @test_sf_vc_v_xvw_e32m2( %vd, %vs2, i32 signext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xvw_e32m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma +; CHECK-NEXT: sf.vc.v.xvw 3, v8, v12, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xvw.nxv4i64.iXLen.nxv4i32.i32.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xvw.nxv4i64.iXLen.nxv4i32.i32.iXLen(iXLen, , , i32, iXLen) + +define @test_sf_vc_v_xvw_e32m4( %vd, %vs2, i32 signext %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_xvw_e32m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, ma +; CHECK-NEXT: sf.vc.v.xvw 3, v8, v16, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.xvw.nxv8i64.iXLen.nxv8i32.i32.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.xvw.nxv8i64.iXLen.nxv8i32.i32.iXLen(iXLen, , , i32, iXLen) + +define void @test_sf_vc_ivw_se_e8mf8( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_ivw_se_e8mf8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: sf.vc.ivw 3, v8, v9, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1i16.nxv1i8.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1i16.nxv1i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define void @test_sf_vc_ivw_se_e8mf4( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_ivw_se_e8mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: sf.vc.ivw 3, v8, v9, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2i16.nxv2i8.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2i16.nxv2i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define void @test_sf_vc_ivw_se_e8mf2( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_ivw_se_e8mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: sf.vc.ivw 3, v8, v9, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4i16.nxv4i8.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4i16.nxv4i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define void @test_sf_vc_ivw_se_e8m1( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_ivw_se_e8m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: sf.vc.ivw 3, v8, v10, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8i16.nxv8i8.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8i16.nxv8i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define void @test_sf_vc_ivw_se_e8m2( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_ivw_se_e8m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: sf.vc.ivw 3, v8, v12, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv16i16.nxv16i8.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv16i16.nxv16i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define void @test_sf_vc_ivw_se_e8m4( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_ivw_se_e8m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: sf.vc.ivw 3, v8, v16, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv32i16.nxv32i8.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv32i16.nxv32i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define void @test_sf_vc_ivw_se_e16mf4( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_ivw_se_e16mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.ivw 3, v8, v9, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1i32.nxv1i16.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1i32.nxv1i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define void @test_sf_vc_ivw_se_e16mf2( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_ivw_se_e16mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.ivw 3, v8, v9, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2i32.nxv2i16.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2i32.nxv2i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define void @test_sf_vc_ivw_se_e16m1( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_ivw_se_e16m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.ivw 3, v8, v10, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4i32.nxv4i16.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4i32.nxv4i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define void @test_sf_vc_ivw_se_e16m2( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_ivw_se_e16m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.ivw 3, v8, v12, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8i32.nxv8i16.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8i32.nxv8i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define void @test_sf_vc_ivw_se_e16m4( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_ivw_se_e16m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: sf.vc.ivw 3, v8, v16, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv16i32.nxv16i16.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv16i32.nxv16i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define void @test_sf_vc_ivw_se_e32mf2( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_ivw_se_e32mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.ivw 3, v8, v9, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1i64.nxv1i32.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1i64.nxv1i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define void @test_sf_vc_ivw_se_e32m1( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_ivw_se_e32m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: sf.vc.ivw 3, v8, v10, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2i64.nxv2i32.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2i64.nxv2i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define void @test_sf_vc_ivw_se_e32m2( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_ivw_se_e32m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.ivw 3, v8, v12, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4i64.nxv4i32.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4i64.nxv4i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define void @test_sf_vc_ivw_se_e32m4( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_ivw_se_e32m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.ivw 3, v8, v16, 10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8i64.nxv8i32.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8i64.nxv8i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivw_se_e8mf8( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivw_se_e8mf8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivw.se.nxv1i16.iXLen.nxv1i8.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivw.se.nxv1i16.iXLen.nxv1i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivw_se_e8mf4( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivw_se_e8mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma +; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivw.se.nxv2i16.iXLen.nxv2i8.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivw.se.nxv2i16.iXLen.nxv2i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivw_se_e8mf2( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivw_se_e8mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma +; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivw.se.nxv4i16.iXLen.nxv4i8.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivw.se.nxv4i16.iXLen.nxv4i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivw_se_e8m1( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivw_se_e8m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma +; CHECK-NEXT: sf.vc.v.ivw 3, v8, v10, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivw.se.nxv8i16.iXLen.nxv8i8.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivw.se.nxv8i16.iXLen.nxv8i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivw_se_e8m2( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivw_se_e8m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma +; CHECK-NEXT: sf.vc.v.ivw 3, v8, v12, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivw.se.nxv16i16.iXLen.nxv16i8.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivw.se.nxv16i16.iXLen.nxv16i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivw_se_e8m4( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivw_se_e8m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma +; CHECK-NEXT: sf.vc.v.ivw 3, v8, v16, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivw.se.nxv32i16.iXLen.nxv32i8.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivw.se.nxv32i16.iXLen.nxv32i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivw_se_e16mf4( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivw_se_e16mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivw.se.nxv1i32.iXLen.nxv1i16.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivw.se.nxv1i32.iXLen.nxv1i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivw_se_e16mf2( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivw_se_e16mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma +; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivw.se.nxv2i32.iXLen.nxv2i16.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivw.se.nxv2i32.iXLen.nxv2i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivw_se_e16m1( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivw_se_e16m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: sf.vc.v.ivw 3, v8, v10, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivw.se.nxv4i32.iXLen.nxv4i16.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivw.se.nxv4i32.iXLen.nxv4i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivw_se_e16m2( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivw_se_e16m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma +; CHECK-NEXT: sf.vc.v.ivw 3, v8, v12, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivw.se.nxv8i32.iXLen.nxv8i16.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivw.se.nxv8i32.iXLen.nxv8i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivw_se_e16m4( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivw_se_e16m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma +; CHECK-NEXT: sf.vc.v.ivw 3, v8, v16, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivw.se.nxv16i32.iXLen.nxv16i16.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivw.se.nxv16i32.iXLen.nxv16i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivw_se_e32mf2( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivw_se_e32mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivw.se.nxv1i64.iXLen.nxv1i32.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivw.se.nxv1i64.iXLen.nxv1i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivw_se_e32m1( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivw_se_e32m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: sf.vc.v.ivw 3, v8, v10, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivw.se.nxv2i64.iXLen.nxv2i32.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivw.se.nxv2i64.iXLen.nxv2i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivw_se_e32m2( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivw_se_e32m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: sf.vc.v.ivw 3, v8, v12, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivw.se.nxv4i64.iXLen.nxv4i32.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivw.se.nxv4i64.iXLen.nxv4i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivw_se_e32m4( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivw_se_e32m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: sf.vc.v.ivw 3, v8, v16, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivw.se.nxv8i64.iXLen.nxv8i32.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivw.se.nxv8i64.iXLen.nxv8i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivw_e8mf8( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivw_e8mf8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivw.nxv1i16.iXLen.nxv1i8.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivw.nxv1i16.iXLen.nxv1i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivw_e8mf4( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivw_e8mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma +; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivw.nxv2i16.iXLen.nxv2i8.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivw.nxv2i16.iXLen.nxv2i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivw_e8mf2( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivw_e8mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma +; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivw.nxv4i16.iXLen.nxv4i8.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivw.nxv4i16.iXLen.nxv4i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivw_e8m1( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivw_e8m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma +; CHECK-NEXT: sf.vc.v.ivw 3, v8, v10, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivw.nxv8i16.iXLen.nxv8i8.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivw.nxv8i16.iXLen.nxv8i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivw_e8m2( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivw_e8m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma +; CHECK-NEXT: sf.vc.v.ivw 3, v8, v12, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivw.nxv16i16.iXLen.nxv16i8.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivw.nxv16i16.iXLen.nxv16i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivw_e8m4( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivw_e8m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma +; CHECK-NEXT: sf.vc.v.ivw 3, v8, v16, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivw.nxv32i16.iXLen.nxv32i8.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivw.nxv32i16.iXLen.nxv32i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivw_e16mf4( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivw_e16mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivw.nxv1i32.iXLen.nxv1i16.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivw.nxv1i32.iXLen.nxv1i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivw_e16mf2( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivw_e16mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma +; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivw.nxv2i32.iXLen.nxv2i16.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivw.nxv2i32.iXLen.nxv2i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivw_e16m1( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivw_e16m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: sf.vc.v.ivw 3, v8, v10, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivw.nxv4i32.iXLen.nxv4i16.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivw.nxv4i32.iXLen.nxv4i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivw_e16m2( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivw_e16m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma +; CHECK-NEXT: sf.vc.v.ivw 3, v8, v12, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivw.nxv8i32.iXLen.nxv8i16.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivw.nxv8i32.iXLen.nxv8i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivw_e16m4( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivw_e16m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma +; CHECK-NEXT: sf.vc.v.ivw 3, v8, v16, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivw.nxv16i32.iXLen.nxv16i16.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivw.nxv16i32.iXLen.nxv16i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivw_e32mf2( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivw_e32mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivw.nxv1i64.iXLen.nxv1i32.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivw.nxv1i64.iXLen.nxv1i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivw_e32m1( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivw_e32m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: sf.vc.v.ivw 3, v8, v10, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivw.nxv2i64.iXLen.nxv2i32.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivw.nxv2i64.iXLen.nxv2i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivw_e32m2( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivw_e32m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: sf.vc.v.ivw 3, v8, v12, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivw.nxv4i64.iXLen.nxv4i32.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivw.nxv4i64.iXLen.nxv4i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define @test_sf_vc_v_ivw_e32m4( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_ivw_e32m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: sf.vc.v.ivw 3, v8, v16, 10 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.ivw.nxv8i64.iXLen.nxv8i32.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.ivw.nxv8i64.iXLen.nxv8i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) + +define void @test_sf_vc_fvw_se_e16mf4( %vd, %vs2, half %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvw_se_e16mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.fvw 1, v8, v9, fa0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv1i32.nxv1i16.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv1i32.nxv1i16.f16.iXLen(iXLen, , , half, iXLen) + +define void @test_sf_vc_fvw_se_e16mf2( %vd, %vs2, half %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvw_se_e16mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.fvw 1, v8, v9, fa0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv2i32.nxv2i16.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv2i32.nxv2i16.f16.iXLen(iXLen, , , half, iXLen) + +define void @test_sf_vc_fvw_se_e16m1( %vd, %vs2, half %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvw_se_e16m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.fvw 1, v8, v10, fa0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv4i32.nxv4i16.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv4i32.nxv4i16.f16.iXLen(iXLen, , , half, iXLen) + +define void @test_sf_vc_fvw_se_e16m2( %vd, %vs2, half %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvw_se_e16m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.fvw 1, v8, v12, fa0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv8i32.nxv8i16.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv8i32.nxv8i16.f16.iXLen(iXLen, , , half, iXLen) + +define void @test_sf_vc_fvw_se_e16m4( %vd, %vs2, half %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvw_se_e16m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: sf.vc.fvw 1, v8, v16, fa0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv16i32.nxv16i16.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv16i32.nxv16i16.f16.iXLen(iXLen, , , half, iXLen) + +define void @test_sf_vc_fvw_se_e32mf2( %vd, %vs2, float %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvw_se_e32mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.fvw 1, v8, v9, fa0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv1i64.nxv1i32.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv1i64.nxv1i32.f32.iXLen(iXLen, , , float, iXLen) + +define void @test_sf_vc_fvw_se_e32m1( %vd, %vs2, float %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvw_se_e32m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: sf.vc.fvw 1, v8, v10, fa0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv2i64.nxv2i32.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv2i64.nxv2i32.f32.iXLen(iXLen, , , float, iXLen) + +define void @test_sf_vc_fvw_se_e32m2( %vd, %vs2, float %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvw_se_e32m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.fvw 1, v8, v12, fa0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv4i64.nxv4i32.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv4i64.nxv4i32.f32.iXLen(iXLen, , , float, iXLen) + +define void @test_sf_vc_fvw_se_e32m4( %vd, %vs2, float %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvw_se_e32m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.fvw 1, v8, v16, fa0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv8i64.nxv8i32.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv8i64.nxv8i32.f32.iXLen(iXLen, , , float, iXLen) + +define @test_sf_vc_v_fvw_se_e16mf4( %vd, %vs2, half %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvw_se_e16mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: sf.vc.v.fvw 1, v8, v9, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fvw.se.nxv1i32.iXLen.nxv1i16.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fvw.se.nxv1i32.iXLen.nxv1i16.f16.iXLen(iXLen, , , half, iXLen) + +define @test_sf_vc_v_fvw_se_e16mf2( %vd, %vs2, half %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvw_se_e16mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma +; CHECK-NEXT: sf.vc.v.fvw 1, v8, v9, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fvw.se.nxv2i32.iXLen.nxv2i16.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fvw.se.nxv2i32.iXLen.nxv2i16.f16.iXLen(iXLen, , , half, iXLen) + +define @test_sf_vc_v_fvw_se_e16m1( %vd, %vs2, half %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvw_se_e16m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: sf.vc.v.fvw 1, v8, v10, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fvw.se.nxv4i32.iXLen.nxv4i16.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fvw.se.nxv4i32.iXLen.nxv4i16.f16.iXLen(iXLen, , , half, iXLen) + +define @test_sf_vc_v_fvw_se_e16m2( %vd, %vs2, half %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvw_se_e16m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma +; CHECK-NEXT: sf.vc.v.fvw 1, v8, v12, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fvw.se.nxv8i32.iXLen.nxv8i16.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fvw.se.nxv8i32.iXLen.nxv8i16.f16.iXLen(iXLen, , , half, iXLen) + +define @test_sf_vc_v_fvw_se_e16m4( %vd, %vs2, half %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvw_se_e16m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma +; CHECK-NEXT: sf.vc.v.fvw 1, v8, v16, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fvw.se.nxv16i32.iXLen.nxv16i16.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fvw.se.nxv16i32.iXLen.nxv16i16.f16.iXLen(iXLen, , , half, iXLen) + +define @test_sf_vc_v_fvw_se_e32mf2( %vd, %vs2, float %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvw_se_e32mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: sf.vc.v.fvw 1, v8, v9, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fvw.se.nxv1i64.iXLen.nxv1i32.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fvw.se.nxv1i64.iXLen.nxv1i32.f32.iXLen(iXLen, , , float, iXLen) + +define @test_sf_vc_v_fvw_se_e32m1( %vd, %vs2, float %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvw_se_e32m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: sf.vc.v.fvw 1, v8, v10, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fvw.se.nxv2i64.iXLen.nxv2i32.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fvw.se.nxv2i64.iXLen.nxv2i32.f32.iXLen(iXLen, , , float, iXLen) + +define @test_sf_vc_v_fvw_se_e32m2( %vd, %vs2, float %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvw_se_e32m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: sf.vc.v.fvw 1, v8, v12, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fvw.se.nxv4i64.iXLen.nxv4i32.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fvw.se.nxv4i64.iXLen.nxv4i32.f32.iXLen(iXLen, , , float, iXLen) + +define @test_sf_vc_v_fvw_se_e32m4( %vd, %vs2, float %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvw_se_e32m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: sf.vc.v.fvw 1, v8, v16, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fvw.se.nxv8i64.iXLen.nxv8i32.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fvw.se.nxv8i64.iXLen.nxv8i32.f32.iXLen(iXLen, , , float, iXLen) + +define @test_sf_vc_v_fvw_e16mf4( %vd, %vs2, half %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvw_e16mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: sf.vc.v.fvw 1, v8, v9, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fvw.nxv1i32.iXLen.nxv1i16.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fvw.nxv1i32.iXLen.nxv1i16.f16.iXLen(iXLen, , , half, iXLen) + +define @test_sf_vc_v_fvw_e16mf2( %vd, %vs2, half %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvw_e16mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma +; CHECK-NEXT: sf.vc.v.fvw 1, v8, v9, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fvw.nxv2i32.iXLen.nxv2i16.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fvw.nxv2i32.iXLen.nxv2i16.f16.iXLen(iXLen, , , half, iXLen) + +define @test_sf_vc_v_fvw_e16m1( %vd, %vs2, half %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvw_e16m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: sf.vc.v.fvw 1, v8, v10, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fvw.nxv4i32.iXLen.nxv4i16.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fvw.nxv4i32.iXLen.nxv4i16.f16.iXLen(iXLen, , , half, iXLen) + +define @test_sf_vc_v_fvw_e16m2( %vd, %vs2, half %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvw_e16m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma +; CHECK-NEXT: sf.vc.v.fvw 1, v8, v12, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fvw.nxv8i32.iXLen.nxv8i16.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fvw.nxv8i32.iXLen.nxv8i16.f16.iXLen(iXLen, , , half, iXLen) + +define @test_sf_vc_v_fvw_e16m4( %vd, %vs2, half %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvw_e16m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma +; CHECK-NEXT: sf.vc.v.fvw 1, v8, v16, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fvw.nxv16i32.iXLen.nxv16i16.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fvw.nxv16i32.iXLen.nxv16i16.f16.iXLen(iXLen, , , half, iXLen) + +define @test_sf_vc_v_fvw_e32mf2( %vd, %vs2, float %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvw_e32mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: sf.vc.v.fvw 1, v8, v9, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fvw.nxv1i64.iXLen.nxv1i32.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fvw.nxv1i64.iXLen.nxv1i32.f32.iXLen(iXLen, , , float, iXLen) + +define @test_sf_vc_v_fvw_e32m1( %vd, %vs2, float %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvw_e32m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: sf.vc.v.fvw 1, v8, v10, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fvw.nxv2i64.iXLen.nxv2i32.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fvw.nxv2i64.iXLen.nxv2i32.f32.iXLen(iXLen, , , float, iXLen) + +define @test_sf_vc_v_fvw_e32m2( %vd, %vs2, float %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvw_e32m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: sf.vc.v.fvw 1, v8, v12, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fvw.nxv4i64.iXLen.nxv4i32.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fvw.nxv4i64.iXLen.nxv4i32.f32.iXLen(iXLen, , , float, iXLen) + +define @test_sf_vc_v_fvw_e32m4( %vd, %vs2, float %fs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvw_e32m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: sf.vc.v.fvw 1, v8, v16, fa0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.fvw.nxv8i64.iXLen.nxv8i32.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.fvw.nxv8i64.iXLen.nxv8i32.f32.iXLen(iXLen, , , float, iXLen)