diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -1135,6 +1135,14 @@ def "int_riscv_" #NAME :RISCVConversionNoMask; def "int_riscv_" # NAME # "_mask" : RISCVConversionMask; } + multiclass RISCVStrictConversion { + let IntrProperties = [IntrInaccessibleMemOnly, IntrWillReturn] in { + def "int_riscv_strict_" #NAME : RISCVConversionNoMask; + } + let IntrProperties = [ImmArg>, IntrInaccessibleMemOnly, IntrWillReturn] in { + def "int_riscv_strict_" # NAME # "_mask" : RISCVConversionMask; + } + } multiclass RISCVUSSegLoad { def "int_riscv_" # NAME : RISCVUSSegLoad; def "int_riscv_" # NAME # "_mask" : RISCVUSSegLoadMask; @@ -1419,28 +1427,28 @@ defm vmsof : RISCVMaskUnaryMOut; defm vmsif : RISCVMaskUnaryMOut; - defm vfcvt_xu_f_v : RISCVConversion; - defm vfcvt_x_f_v : RISCVConversion; + defm vfcvt_xu_f_v : RISCVConversion, RISCVStrictConversion; + defm vfcvt_x_f_v : RISCVConversion, RISCVStrictConversion; defm vfcvt_rtz_xu_f_v : RISCVConversion; defm vfcvt_rtz_x_f_v : RISCVConversion; - defm vfcvt_f_xu_v : RISCVConversion; - defm vfcvt_f_x_v : RISCVConversion; + defm vfcvt_f_xu_v : RISCVConversion, RISCVStrictConversion; + defm vfcvt_f_x_v : RISCVConversion, RISCVStrictConversion; - defm vfwcvt_f_xu_v : RISCVConversion; - defm vfwcvt_f_x_v : RISCVConversion; - defm vfwcvt_xu_f_v : RISCVConversion; - defm vfwcvt_x_f_v : RISCVConversion; + defm vfwcvt_f_xu_v : RISCVConversion, RISCVStrictConversion; + defm vfwcvt_f_x_v : RISCVConversion, RISCVStrictConversion; + defm vfwcvt_xu_f_v : RISCVConversion, RISCVStrictConversion; + defm vfwcvt_x_f_v : RISCVConversion, RISCVStrictConversion; defm vfwcvt_rtz_xu_f_v : RISCVConversion; defm vfwcvt_rtz_x_f_v : RISCVConversion; - defm vfwcvt_f_f_v : RISCVConversion; + defm vfwcvt_f_f_v : RISCVConversion, RISCVStrictConversion; - defm vfncvt_f_xu_w : RISCVConversion; - defm vfncvt_f_x_w : RISCVConversion; - defm vfncvt_xu_f_w : RISCVConversion; - defm vfncvt_x_f_w : RISCVConversion; + defm vfncvt_f_xu_w : RISCVConversion, RISCVStrictConversion; + defm vfncvt_f_x_w : RISCVConversion, RISCVStrictConversion; + defm vfncvt_xu_f_w : RISCVConversion, RISCVStrictConversion; + defm vfncvt_x_f_w : RISCVConversion, RISCVStrictConversion; defm vfncvt_rtz_xu_f_w : RISCVConversion; defm vfncvt_rtz_x_f_w : RISCVConversion; - defm vfncvt_f_f_w : RISCVConversion; + defm vfncvt_f_f_w : RISCVConversion, RISCVStrictConversion; defm vfncvt_rod_f_f_w : RISCVConversion; // Output: (vector) diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -308,6 +308,12 @@ // result being sign extended to 64 bit. These saturate out of range inputs. STRICT_FCVT_W_RV64 = ISD::FIRST_TARGET_STRICTFP_OPCODE, STRICT_FCVT_WU_RV64, + STRICT_VFCVT_X_F, + STRICT_VFCVT_XU_F, + STRICT_VFCVT_F_X, + STRICT_VFCVT_F_XU, + STRICT_FP_ROUND_VL, + STRICT_FP_EXTEND_VL, // Memory opcodes start here. VLE_VL = ISD::FIRST_TARGET_MEMORY_OPCODE, @@ -639,6 +645,9 @@ SDValue lowerGET_ROUNDING(SDValue Op, SelectionDAG &DAG) const; SDValue lowerSET_ROUNDING(SDValue Op, SelectionDAG &DAG) const; + SDValue lowerRVVStrictIntrinsics(SDValue Op, SelectionDAG &DAG, unsigned Opc, + bool HasMask) const; + SDValue expandUnalignedRVVLoad(SDValue Op, SelectionDAG &DAG) const; SDValue expandUnalignedRVVStore(SDValue Op, SelectionDAG &DAG) const; diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -4656,6 +4656,35 @@ return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands); } +SDValue RISCVTargetLowering::lowerRVVStrictIntrinsics(SDValue Op, + SelectionDAG &DAG, + unsigned Opc, + bool HasMask) const { + SDLoc DL(Op); + MVT XLenVT = Subtarget.getXLenVT(); + MVT VT = Op.getSimpleValueType(); + SDVTList VTs = DAG.getVTList({VT, MVT::Other}); + SmallVector Ops; + Ops.push_back(Op.getOperand(0)); // Chain + unsigned NumOperands = Op.getNumOperands(); + if (HasMask) { + for (size_t i = 2; i < NumOperands; i++) + Ops.push_back(Op.getOperand(i)); + } else { + for (size_t i = 2; i < NumOperands - 1; i++) + Ops.push_back(Op.getOperand(i)); + SDValue VL = Op.getOperand(NumOperands - 1); + MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount()); + SDValue TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL); + Ops.push_back(TrueMask); + Ops.push_back(VL); + // Since unmasked intrinsics and pseudos have no policy operand, + // we use 0 here for pattern matching. + Ops.push_back(DAG.getConstant(0, DL, XLenVT)); // Policy + } + return DAG.getNode(Opc, DL, VTs, Ops); +} + SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const { unsigned IntNo = Op.getConstantOperandVal(0); @@ -4923,6 +4952,27 @@ Result = convertFromScalableVector(VT, Result, DAG, Subtarget); return DAG.getMergeValues({Result, Chain}, DL); } +#define CASE_STRICT(Intrin, Opcode) \ + case Intrinsic::riscv_strict_##Intrin: \ + return lowerRVVStrictIntrinsics(Op, DAG, Opcode, /*HasMask*/ false); \ + case Intrinsic::riscv_strict_##Intrin##_mask: \ + return lowerRVVStrictIntrinsics(Op, DAG, Opcode, /*HasMask*/ true); + + CASE_STRICT(vfcvt_xu_f_v, RISCVISD::STRICT_VFCVT_XU_F) + CASE_STRICT(vfcvt_x_f_v, RISCVISD::STRICT_VFCVT_X_F) + CASE_STRICT(vfcvt_f_xu_v, RISCVISD::STRICT_VFCVT_F_XU) + CASE_STRICT(vfcvt_f_x_v, RISCVISD::STRICT_VFCVT_F_X) + CASE_STRICT(vfwcvt_xu_f_v, RISCVISD::STRICT_VFCVT_XU_F) + CASE_STRICT(vfwcvt_x_f_v, RISCVISD::STRICT_VFCVT_X_F) + CASE_STRICT(vfwcvt_f_xu_v, RISCVISD::STRICT_VFCVT_F_XU) + CASE_STRICT(vfwcvt_f_x_v, RISCVISD::STRICT_VFCVT_F_X) + CASE_STRICT(vfwcvt_f_f_v, RISCVISD::STRICT_FP_EXTEND_VL) + CASE_STRICT(vfncvt_xu_f_w, RISCVISD::STRICT_VFCVT_XU_F) + CASE_STRICT(vfncvt_x_f_w, RISCVISD::STRICT_VFCVT_X_F) + CASE_STRICT(vfncvt_f_xu_w, RISCVISD::STRICT_VFCVT_F_XU) + CASE_STRICT(vfncvt_f_x_w, RISCVISD::STRICT_VFCVT_F_X) + CASE_STRICT(vfncvt_f_f_w, RISCVISD::STRICT_FP_ROUND_VL) +#undef CASE_STRICT } return lowerVectorIntrinsicSplats(Op, DAG, Subtarget); @@ -10691,6 +10741,12 @@ NODE_NAME_CASE(VCPOP_VL) NODE_NAME_CASE(VLE_VL) NODE_NAME_CASE(VSE_VL) + NODE_NAME_CASE(STRICT_VFCVT_X_F) + NODE_NAME_CASE(STRICT_VFCVT_XU_F) + NODE_NAME_CASE(STRICT_VFCVT_F_X) + NODE_NAME_CASE(STRICT_VFCVT_F_XU) + NODE_NAME_CASE(STRICT_FP_ROUND_VL) + NODE_NAME_CASE(STRICT_FP_EXTEND_VL) NODE_NAME_CASE(READ_CSR) NODE_NAME_CASE(WRITE_CSR) NODE_NAME_CASE(SWAP_CSR) diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td @@ -1228,34 +1228,54 @@ } // hasSideEffects = 0, mayLoad = 0, mayStore = 0 // Single-Width Floating-Point/Integer Type-Convert Instructions +let Uses = [FRM], mayRaiseFPException = true in { defm VFCVT_XU_F_V : VCVTI_FV_VS2<"vfcvt.xu.f.v", 0b010010, 0b00000>; defm VFCVT_X_F_V : VCVTI_FV_VS2<"vfcvt.x.f.v", 0b010010, 0b00001>; +} +let mayRaiseFPException = true in { defm VFCVT_RTZ_XU_F_V : VCVTI_FV_VS2<"vfcvt.rtz.xu.f.v", 0b010010, 0b00110>; defm VFCVT_RTZ_X_F_V : VCVTI_FV_VS2<"vfcvt.rtz.x.f.v", 0b010010, 0b00111>; +} +let Uses = [FRM], mayRaiseFPException = true in { defm VFCVT_F_XU_V : VCVTF_IV_VS2<"vfcvt.f.xu.v", 0b010010, 0b00010>; defm VFCVT_F_X_V : VCVTF_IV_VS2<"vfcvt.f.x.v", 0b010010, 0b00011>; +} // Widening Floating-Point/Integer Type-Convert Instructions let Constraints = "@earlyclobber $vd", RVVConstraint = WidenCvt in { +let Uses = [FRM], mayRaiseFPException = true in { defm VFWCVT_XU_F_V : VWCVTI_FV_VS2<"vfwcvt.xu.f.v", 0b010010, 0b01000>; defm VFWCVT_X_F_V : VWCVTI_FV_VS2<"vfwcvt.x.f.v", 0b010010, 0b01001>; +} +let mayRaiseFPException = true in { defm VFWCVT_RTZ_XU_F_V : VWCVTI_FV_VS2<"vfwcvt.rtz.xu.f.v", 0b010010, 0b01110>; defm VFWCVT_RTZ_X_F_V : VWCVTI_FV_VS2<"vfwcvt.rtz.x.f.v", 0b010010, 0b01111>; +} +let Uses = [FRM], mayRaiseFPException = true in { defm VFWCVT_F_XU_V : VWCVTF_IV_VS2<"vfwcvt.f.xu.v", 0b010010, 0b01010>; defm VFWCVT_F_X_V : VWCVTF_IV_VS2<"vfwcvt.f.x.v", 0b010010, 0b01011>; defm VFWCVT_F_F_V : VWCVTF_FV_VS2<"vfwcvt.f.f.v", 0b010010, 0b01100>; +} } // Constraints = "@earlyclobber $vd", RVVConstraint = WidenCvt // Narrowing Floating-Point/Integer Type-Convert Instructions let Constraints = "@earlyclobber $vd" in { +let Uses = [FRM], mayRaiseFPException = true in { defm VFNCVT_XU_F_W : VNCVTI_FV_VS2<"vfncvt.xu.f.w", 0b010010, 0b10000>; defm VFNCVT_X_F_W : VNCVTI_FV_VS2<"vfncvt.x.f.w", 0b010010, 0b10001>; +} +let mayRaiseFPException = true in { defm VFNCVT_RTZ_XU_F_W : VNCVTI_FV_VS2<"vfncvt.rtz.xu.f.w", 0b010010, 0b10110>; defm VFNCVT_RTZ_X_F_W : VNCVTI_FV_VS2<"vfncvt.rtz.x.f.w", 0b010010, 0b10111>; +} +let Uses = [FRM], mayRaiseFPException = true in { defm VFNCVT_F_XU_W : VNCVTF_IV_VS2<"vfncvt.f.xu.w", 0b010010, 0b10010>; defm VFNCVT_F_X_W : VNCVTF_IV_VS2<"vfncvt.f.x.w", 0b010010, 0b10011>; defm VFNCVT_F_F_W : VNCVTF_FV_VS2<"vfncvt.f.f.w", 0b010010, 0b10100>; +} +let mayRaiseFPException = true in { defm VFNCVT_ROD_F_F_W : VNCVTF_FV_VS2<"vfncvt.rod.f.f.w", 0b010010, 0b10101>; +} } // Constraints = "@earlyclobber $vd" } // Predicates = HasVInstructionsAnyF] diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -4670,35 +4670,47 @@ //===----------------------------------------------------------------------===// // 14.17. Single-Width Floating-Point/Integer Type-Convert Instructions //===----------------------------------------------------------------------===// -defm PseudoVFCVT_XU_F : VPseudoVCVTI_V; -defm PseudoVFCVT_X_F : VPseudoVCVTI_V; +let mayRaiseFPException = true in { defm PseudoVFCVT_RTZ_XU_F : VPseudoVCVTI_V; defm PseudoVFCVT_RTZ_X_F : VPseudoVCVTI_V; +} +let Uses = [FRM], mayRaiseFPException = true in { +defm PseudoVFCVT_XU_F : VPseudoVCVTI_V; +defm PseudoVFCVT_X_F : VPseudoVCVTI_V; defm PseudoVFCVT_F_XU : VPseudoVCVTF_V; defm PseudoVFCVT_F_X : VPseudoVCVTF_V; +} //===----------------------------------------------------------------------===// // 14.18. Widening Floating-Point/Integer Type-Convert Instructions //===----------------------------------------------------------------------===// -defm PseudoVFWCVT_XU_F : VPseudoVWCVTI_V; -defm PseudoVFWCVT_X_F : VPseudoVWCVTI_V; +let mayRaiseFPException = true in { defm PseudoVFWCVT_RTZ_XU_F : VPseudoVWCVTI_V; defm PseudoVFWCVT_RTZ_X_F : VPseudoVWCVTI_V; +} +let Uses = [FRM], mayRaiseFPException = true in { +defm PseudoVFWCVT_XU_F : VPseudoVWCVTI_V; +defm PseudoVFWCVT_X_F : VPseudoVWCVTI_V; defm PseudoVFWCVT_F_XU : VPseudoVWCVTF_V; defm PseudoVFWCVT_F_X : VPseudoVWCVTF_V; defm PseudoVFWCVT_F_F : VPseudoVWCVTD_V; +} //===----------------------------------------------------------------------===// // 14.19. Narrowing Floating-Point/Integer Type-Convert Instructions //===----------------------------------------------------------------------===// -defm PseudoVFNCVT_XU_F : VPseudoVNCVTI_W; -defm PseudoVFNCVT_X_F : VPseudoVNCVTI_W; +let mayRaiseFPException = true in { defm PseudoVFNCVT_RTZ_XU_F : VPseudoVNCVTI_W; defm PseudoVFNCVT_RTZ_X_F : VPseudoVNCVTI_W; +defm PseudoVFNCVT_ROD_F_F : VPseudoVNCVTD_W; +} +let Uses = [FRM], mayRaiseFPException = true in { +defm PseudoVFNCVT_XU_F : VPseudoVNCVTI_W; +defm PseudoVFNCVT_X_F : VPseudoVNCVTI_W; defm PseudoVFNCVT_F_XU : VPseudoVNCVTF_W; defm PseudoVFNCVT_F_X : VPseudoVNCVTF_W; defm PseudoVFNCVT_F_F : VPseudoVNCVTD_W; -defm PseudoVFNCVT_ROD_F_F : VPseudoVNCVTD_W; +} } // Predicates = [HasVInstructionsAnyF] let Predicates = [HasVInstructions] in { diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td @@ -125,11 +125,30 @@ SDTCisFP<0>, SDTCisFP<1>, SDTCisOpSmallerThanOp<1, 0>, SDTCisSameNumEltsAs<0, 1>, SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT> ]>; +// Input: (passthru, vector, mask, vl, policy) +def SDT_RISCVFPRoundOp_VL_TA: SDTypeProfile<1, 5, [ + SDTCisFP<0>, SDTCisVec<0>, SDTCisSameAs<0, 1>, SDTCisOpSmallerThanOp<0, 2>, + SDTCisFP<2>, SDTCisVec<2>, SDTCisSameNumEltsAs<0, 2>, + SDTCVecEltisVT<3, i1>, SDTCisSameNumEltsAs<0, 3>, + SDTCisVT<4, XLenVT>, SDTCisVT<5, XLenVT> +]>; +// Input: (passthru, vector, mask, vl, policy) +def SDT_RISCVFPExtendOp_VL_TA : SDTypeProfile<1, 5, [ + SDTCisFP<0>, SDTCisVec<0>, SDTCisSameAs<0, 1>, SDTCisOpSmallerThanOp<2, 0>, + SDTCisFP<2>, SDTCisVec<2>, SDTCisSameNumEltsAs<0, 2>, + SDTCVecEltisVT<3, i1>, SDTCisSameNumEltsAs<0, 3>, + SDTCisVT<4, XLenVT>, SDTCisVT<5, XLenVT> +]>; def riscv_fpround_vl : SDNode<"RISCVISD::FP_ROUND_VL", SDT_RISCVFPRoundOp_VL>; def riscv_fpextend_vl : SDNode<"RISCVISD::FP_EXTEND_VL", SDT_RISCVFPExtendOp_VL>; def riscv_fncvt_rod_vl : SDNode<"RISCVISD::VFNCVT_ROD_VL", SDT_RISCVFPRoundOp_VL>; +def riscv_strict_fpextend_vl : SDNode<"RISCVISD::STRICT_FP_EXTEND_VL", + SDT_RISCVFPExtendOp_VL_TA, [SDNPHasChain]>; +def riscv_strict_fpround_vl : SDNode<"RISCVISD::STRICT_FP_ROUND_VL", + SDT_RISCVFPRoundOp_VL_TA, [SDNPHasChain]>; + def SDT_RISCVFP2IOp_VL : SDTypeProfile<1, 3, [ SDTCisInt<0>, SDTCisFP<1>, SDTCisSameNumEltsAs<0, 1>, SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT> @@ -144,6 +163,31 @@ def riscv_sint_to_fp_vl : SDNode<"RISCVISD::SINT_TO_FP_VL", SDT_RISCVI2FPOp_VL>; def riscv_uint_to_fp_vl : SDNode<"RISCVISD::UINT_TO_FP_VL", SDT_RISCVI2FPOp_VL>; +// Input: (passthru, vector, mask, vl, policy) +def SDT_RISCVFP2IOp_VL_TA: SDTypeProfile<1, 5, [ + SDTCisInt<0>, SDTCisVec<0>, SDTCisSameNumEltsAs<0, 1>, + SDTCisFP<2>, SDTCisVec<2>, SDTCisSameNumEltsAs<0, 2>, + SDTCVecEltisVT<3, i1>, SDTCisSameNumEltsAs<0, 3>, + SDTCisVT<4, XLenVT>, SDTCisVT<5, XLenVT> +]>; + +// Input: (passthru, vector, mask, vl, policy) +def SDT_RISCVI2FPOp_VL_TA: SDTypeProfile<1, 5, [ + SDTCisFP<0>, SDTCisVec<0>, SDTCisSameNumEltsAs<0, 1>, + SDTCisInt<2>, SDTCisVec<2>, SDTCisSameNumEltsAs<0, 2>, + SDTCVecEltisVT<3, i1>, SDTCisSameNumEltsAs<0, 3>, + SDTCisVT<4, XLenVT>, SDTCisVT<5, XLenVT> +]>; + +def riscv_strict_fp_to_sint_vl : SDNode<"RISCVISD::STRICT_VFCVT_X_F", + SDT_RISCVFP2IOp_VL_TA, [SDNPHasChain]>; +def riscv_strict_fp_to_uint_vl : SDNode<"RISCVISD::STRICT_VFCVT_XU_F", + SDT_RISCVFP2IOp_VL_TA, [SDNPHasChain]>; +def riscv_strict_sint_to_fp_vl : SDNode<"RISCVISD::STRICT_VFCVT_F_X", + SDT_RISCVI2FPOp_VL_TA, [SDNPHasChain]>; +def riscv_strict_uint_to_fp_vl : SDNode<"RISCVISD::STRICT_VFCVT_F_XU", + SDT_RISCVI2FPOp_VL_TA, [SDNPHasChain]>; + def riscv_setcc_vl : SDNode<"RISCVISD::SETCC_VL", SDTypeProfile<1, 5, [SDTCVecEltisVT<0, i1>, SDTCisVec<1>, @@ -563,6 +607,50 @@ } } +multiclass VPatConvertStrictSDNode_VL { + def : Pat<(result_type (vop (result_type undef), + (op2_type op2_reg_class:$rs2), + (mask_type true_mask), + VLOpFrag, (XLenVT 0))), + (!cast(inst#"_"#kind#"_"#vlmul.MX) + (op2_type op2_reg_class:$rs2), + GPR:$vl, sew)>; + def : Pat<(result_type (vop (result_type result_reg_class:$merge), + (op2_type op2_reg_class:$rs2), + (mask_type true_mask), + VLOpFrag, (XLenVT 0))), + (!cast(inst#"_"#kind#"_"#vlmul.MX#"_TU") + (result_type result_reg_class:$merge), + (op2_type op2_reg_class:$rs2), + GPR:$vl, sew)>; + def : Pat<(result_type (vop (result_type result_reg_class:$merge), + (op2_type op2_reg_class:$rs2), + (mask_type V0), + VLOpFrag, (XLenVT timm:$policy))), + (!cast(inst#"_"#kind#"_"#vlmul.MX#"_MASK") + (result_type result_reg_class:$merge), + (op2_type op2_reg_class:$rs2), + (mask_type V0), GPR:$vl, sew, (XLenVT timm:$policy))>; +} + +multiclass VPatConvertFP2ISDNode_V_VL_STRICT { + foreach fvti = AllFloatVectors in { + defvar ivti = GetIntVTypeInfo.Vti; + defm : VPatConvertStrictSDNode_VL; + } +} + multiclass VPatConvertI2FPSDNode_V_VL { foreach fvti = AllFloatVectors in { defvar ivti = GetIntVTypeInfo.Vti; @@ -574,6 +662,15 @@ } } +multiclass VPatConvertI2FPSDNode_V_VL_STRICT { + foreach fvti = AllFloatVectors in { + defvar ivti = GetIntVTypeInfo.Vti; + defm : VPatConvertStrictSDNode_VL; + } +} + multiclass VPatWConvertFP2ISDNode_V_VL { foreach fvtiToFWti = AllWidenableFloatVectors in { defvar fvti = fvtiToFWti.Vti; @@ -586,6 +683,18 @@ } } +multiclass VPatWConvertFP2ISDNode_V_VL_STRICT { + foreach fvtiToFWti = AllWidenableFloatVectors in + { + defvar fvti = fvtiToFWti.Vti; + defvar iwti = GetIntVTypeInfo.Vti; + + defm : VPatConvertStrictSDNode_VL; + } +} + multiclass VPatWConvertI2FPSDNode_V_VL { foreach vtiToWti = AllWidenableIntToFloatVectors in { defvar ivti = vtiToWti.Vti; @@ -598,6 +707,16 @@ } } +multiclass VPatWConvertI2FPSDNode_V_VL_STRICT { + foreach vtiToWti = AllWidenableIntToFloatVectors in { + defvar vti = vtiToWti.Vti; + defvar fwti = vtiToWti.Wti; + defm : VPatConvertStrictSDNode_VL; + } +} + multiclass VPatNConvertFP2ISDNode_V_VL { foreach vtiToWti = AllWidenableIntToFloatVectors in { defvar vti = vtiToWti.Vti; @@ -610,6 +729,16 @@ } } +multiclass VPatNConvertFP2ISDNode_V_VL_STRICT { + foreach vtiToWti = AllWidenableIntToFloatVectors in { + defvar vti = vtiToWti.Vti; + defvar fwti = vtiToWti.Wti; + defm : VPatConvertStrictSDNode_VL; + } +} + multiclass VPatNConvertI2FPSDNode_V_VL { foreach fvtiToFWti = AllWidenableFloatVectors in { defvar fvti = fvtiToFWti.Vti; @@ -622,6 +751,16 @@ } } +multiclass VPatNConvertI2FPSDNode_V_VL_STRICT { + foreach fvtiToFWti = AllWidenableFloatVectors in { + defvar fvti = fvtiToFWti.Vti; + defvar iwti = GetIntVTypeInfo.Vti; + defm : VPatConvertStrictSDNode_VL; + } +} + multiclass VPatReductionVL { foreach vti = !if(is_float, AllFloatVectors, AllIntegerVectors) in { defvar vti_m1 = !cast(!if(is_float, "VF", "VI") # vti.SEW # "M1"); @@ -1419,6 +1558,11 @@ defm : VPatConvertI2FPSDNode_V_VL; defm : VPatConvertI2FPSDNode_V_VL; + defm : VPatConvertFP2ISDNode_V_VL_STRICT; + defm : VPatConvertFP2ISDNode_V_VL_STRICT; + defm : VPatConvertI2FPSDNode_V_VL_STRICT; + defm : VPatConvertI2FPSDNode_V_VL_STRICT; + // 14.18. Widening Floating-Point/Integer Type-Convert Instructions defm : VPatWConvertFP2ISDNode_V_VL; defm : VPatWConvertFP2ISDNode_V_VL; @@ -1432,8 +1576,15 @@ VLOpFrag)), (!cast("PseudoVFWCVT_F_F_V_"#fvti.LMul.MX) fvti.RegClass:$rs1, GPR:$vl, fvti.Log2SEW)>; + defm : VPatConvertStrictSDNode_VL; } + defm : VPatWConvertFP2ISDNode_V_VL_STRICT; + defm : VPatWConvertFP2ISDNode_V_VL_STRICT; + defm : VPatWConvertI2FPSDNode_V_VL_STRICT; + defm : VPatWConvertI2FPSDNode_V_VL_STRICT; // 14.19 Narrowing Floating-Point/Integer Type-Convert Instructions defm : VPatNConvertFP2ISDNode_V_VL; defm : VPatNConvertFP2ISDNode_V_VL; @@ -1447,6 +1598,9 @@ VLOpFrag)), (!cast("PseudoVFNCVT_F_F_W_"#fvti.LMul.MX) fwti.RegClass:$rs1, GPR:$vl, fvti.Log2SEW)>; + defm : VPatConvertStrictSDNode_VL; def : Pat<(fvti.Vector (riscv_fncvt_rod_vl (fwti.Vector fwti.RegClass:$rs1), (fwti.Mask true_mask), @@ -1454,6 +1608,10 @@ (!cast("PseudoVFNCVT_ROD_F_F_W_"#fvti.LMul.MX) fwti.RegClass:$rs1, GPR:$vl, fvti.Log2SEW)>; } + defm : VPatNConvertFP2ISDNode_V_VL_STRICT; + defm : VPatNConvertFP2ISDNode_V_VL_STRICT; + defm : VPatNConvertI2FPSDNode_V_VL_STRICT; + defm : VPatNConvertI2FPSDNode_V_VL_STRICT; } } // Predicates = [HasVInstructionsAnyF] diff --git a/llvm/test/CodeGen/RISCV/rvv/strict-vfcvt-f-x.ll b/llvm/test/CodeGen/RISCV/rvv/strict-vfcvt-f-x.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/strict-vfcvt-f-x.ll @@ -0,0 +1,874 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s +declare @llvm.riscv.strict.vfcvt.f.x.v.nxv1f16.nxv1i16( + , + , + iXLen); + +define @intrinsic_vfcvt_f.x.v_nxv1f16_nxv1i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.nxv1f16.nxv1i16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_f.x.v_nxv1f16_nxv1i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_f.x.v_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.nxv1f16.nxv1i16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv1f16.nxv1i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv1f16.nxv1i16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.x.v.nxv2f16.nxv2i16( + , + , + iXLen); + +define @intrinsic_vfcvt_f.x.v_nxv2f16_nxv2i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.nxv2f16.nxv2i16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_f.x.v_nxv2f16_nxv2i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_f.x.v_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.nxv2f16.nxv2i16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv2f16.nxv2i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_f.x.v_nxv2f16_nxv2i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv2f16.nxv2i16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.x.v.nxv4f16.nxv4i16( + , + , + iXLen); + +define @intrinsic_vfcvt_f.x.v_nxv4f16_nxv4i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.nxv4f16.nxv4i16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_f.x.v_nxv4f16_nxv4i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_f.x.v_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.nxv4f16.nxv4i16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv4f16.nxv4i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_f.x.v_nxv4f16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv4f16.nxv4i16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.x.v.nxv8f16.nxv8i16( + , + , + iXLen); + +define @intrinsic_vfcvt_f.x.v_nxv8f16_nxv8i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv8f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.nxv8f16.nxv8i16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_f.x.v_nxv8f16_nxv8i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_f.x.v_nxv8f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.nxv8f16.nxv8i16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv8f16.nxv8i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_f.x.v_nxv8f16_nxv8i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv8f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv8f16.nxv8i16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.x.v.nxv16f16.nxv16i16( + , + , + iXLen); + +define @intrinsic_vfcvt_f.x.v_nxv16f16_nxv16i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv16f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.nxv16f16.nxv16i16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_f.x.v_nxv16f16_nxv16i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_f.x.v_nxv16f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.nxv16f16.nxv16i16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv16f16.nxv16i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_f.x.v_nxv16f16_nxv16i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv16f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv16f16.nxv16i16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.x.v.nxv32f16.nxv32i16( + , + , + iXLen); + +define @intrinsic_vfcvt_f.x.v_nxv32f16_nxv32i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv32f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.nxv32f16.nxv32i16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_f.x.v_nxv32f16_nxv32i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_f.x.v_nxv32f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.nxv32f16.nxv32i16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv32f16.nxv32i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_f.x.v_nxv32f16_nxv32i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv32f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv32f16.nxv32i16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.x.v.nxv1f32.nxv1i32( + , + , + iXLen); + +define @intrinsic_vfcvt_f.x.v_nxv1f32_nxv1i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.nxv1f32.nxv1i32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_f.x.v_nxv1f32_nxv1i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_f.x.v_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.nxv1f32.nxv1i32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv1f32.nxv1i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_f.x.v_nxv1f32_nxv1i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv1f32.nxv1i32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.x.v.nxv2f32.nxv2i32( + , + , + iXLen); + +define @intrinsic_vfcvt_f.x.v_nxv2f32_nxv2i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.nxv2f32.nxv2i32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_f.x.v_nxv2f32_nxv2i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_f.x.v_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.nxv2f32.nxv2i32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv2f32.nxv2i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_f.x.v_nxv2f32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv2f32.nxv2i32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.x.v.nxv4f32.nxv4i32( + , + , + iXLen); + +define @intrinsic_vfcvt_f.x.v_nxv4f32_nxv4i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv4f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.nxv4f32.nxv4i32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_f.x.v_nxv4f32_nxv4i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_f.x.v_nxv4f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.nxv4f32.nxv4i32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv4f32.nxv4i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_f.x.v_nxv4f32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv4f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv4f32.nxv4i32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.x.v.nxv8f32.nxv8i32( + , + , + iXLen); + +define @intrinsic_vfcvt_f.x.v_nxv8f32_nxv8i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv8f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.nxv8f32.nxv8i32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_f.x.v_nxv8f32_nxv8i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_f.x.v_nxv8f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.nxv8f32.nxv8i32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv8f32.nxv8i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_f.x.v_nxv8f32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv8f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv8f32.nxv8i32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.x.v.nxv16f32.nxv16i32( + , + , + iXLen); + +define @intrinsic_vfcvt_f.x.v_nxv16f32_nxv16i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv16f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.nxv16f32.nxv16i32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_f.x.v_nxv16f32_nxv16i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_f.x.v_nxv16f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.nxv16f32.nxv16i32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv16f32.nxv16i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_f.x.v_nxv16f32_nxv16i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv16f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv16f32.nxv16i32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.x.v.nxv1f64.nxv1i64( + , + , + iXLen); + +define @intrinsic_vfcvt_f.x.v_nxv1f64_nxv1i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv1f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.nxv1f64.nxv1i64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_f.x.v_nxv1f64_nxv1i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_f.x.v_nxv1f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.nxv1f64.nxv1i64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv1f64.nxv1i64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_f.x.v_nxv1f64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv1f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv1f64.nxv1i64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.x.v.nxv2f64.nxv2i64( + , + , + iXLen); + +define @intrinsic_vfcvt_f.x.v_nxv2f64_nxv2i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv2f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.nxv2f64.nxv2i64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_f.x.v_nxv2f64_nxv2i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_f.x.v_nxv2f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.nxv2f64.nxv2i64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv2f64.nxv2i64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_f.x.v_nxv2f64_nxv2i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv2f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv2f64.nxv2i64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.x.v.nxv4f64.nxv4i64( + , + , + iXLen); + +define @intrinsic_vfcvt_f.x.v_nxv4f64_nxv4i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv4f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.nxv4f64.nxv4i64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_f.x.v_nxv4f64_nxv4i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_f.x.v_nxv4f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.nxv4f64.nxv4i64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv4f64.nxv4i64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_f.x.v_nxv4f64_nxv4i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv4f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv4f64.nxv4i64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.x.v.nxv8f64.nxv8i64( + , + , + iXLen); + +define @intrinsic_vfcvt_f.x.v_nxv8f64_nxv8i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv8f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.nxv8f64.nxv8i64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_f.x.v_nxv8f64_nxv8i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_f.x.v_nxv8f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.nxv8f64.nxv8i64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv8f64.nxv8i64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_f.x.v_nxv8f64_nxv8i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv8f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv8f64.nxv8i64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/strict-vfcvt-f-xu.ll b/llvm/test/CodeGen/RISCV/rvv/strict-vfcvt-f-xu.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/strict-vfcvt-f-xu.ll @@ -0,0 +1,874 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s +declare @llvm.riscv.strict.vfcvt.f.xu.v.nxv1f16.nxv1i16( + , + , + iXLen); + +define @intrinsic_vfcvt_f.xu.v_nxv1f16_nxv1i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv1f16.nxv1i16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_f.xu.v_nxv1f16_nxv1i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_f.xu.v_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv1f16.nxv1i16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv1f16.nxv1i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_f.xu.v_nxv1f16_nxv1i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv1f16.nxv1i16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.xu.v.nxv2f16.nxv2i16( + , + , + iXLen); + +define @intrinsic_vfcvt_f.xu.v_nxv2f16_nxv2i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv2f16.nxv2i16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_f.xu.v_nxv2f16_nxv2i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_f.xu.v_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv2f16.nxv2i16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv2f16.nxv2i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_f.xu.v_nxv2f16_nxv2i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv2f16.nxv2i16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.xu.v.nxv4f16.nxv4i16( + , + , + iXLen); + +define @intrinsic_vfcvt_f.xu.v_nxv4f16_nxv4i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv4f16.nxv4i16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_f.xu.v_nxv4f16_nxv4i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_f.xu.v_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv4f16.nxv4i16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv4f16.nxv4i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_f.xu.v_nxv4f16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv4f16.nxv4i16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.xu.v.nxv8f16.nxv8i16( + , + , + iXLen); + +define @intrinsic_vfcvt_f.xu.v_nxv8f16_nxv8i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv8f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv8f16.nxv8i16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_f.xu.v_nxv8f16_nxv8i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_f.xu.v_nxv8f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv8f16.nxv8i16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv8f16.nxv8i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_f.xu.v_nxv8f16_nxv8i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv8f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv8f16.nxv8i16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.xu.v.nxv16f16.nxv16i16( + , + , + iXLen); + +define @intrinsic_vfcvt_f.xu.v_nxv16f16_nxv16i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv16f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv16f16.nxv16i16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_f.xu.v_nxv16f16_nxv16i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_f.xu.v_nxv16f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv16f16.nxv16i16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv16f16.nxv16i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_f.xu.v_nxv16f16_nxv16i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv16f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv16f16.nxv16i16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.xu.v.nxv32f16.nxv32i16( + , + , + iXLen); + +define @intrinsic_vfcvt_f.xu.v_nxv32f16_nxv32i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv32f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv32f16.nxv32i16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_f.xu.v_nxv32f16_nxv32i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_f.xu.v_nxv32f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv32f16.nxv32i16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv32f16.nxv32i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_f.xu.v_nxv32f16_nxv32i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv32f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv32f16.nxv32i16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.xu.v.nxv1f32.nxv1i32( + , + , + iXLen); + +define @intrinsic_vfcvt_f.xu.v_nxv1f32_nxv1i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv1f32.nxv1i32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_f.xu.v_nxv1f32_nxv1i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_f.xu.v_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv1f32.nxv1i32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv1f32.nxv1i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_f.xu.v_nxv1f32_nxv1i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv1f32.nxv1i32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.xu.v.nxv2f32.nxv2i32( + , + , + iXLen); + +define @intrinsic_vfcvt_f.xu.v_nxv2f32_nxv2i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv2f32.nxv2i32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_f.xu.v_nxv2f32_nxv2i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_f.xu.v_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv2f32.nxv2i32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv2f32.nxv2i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_f.xu.v_nxv2f32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv2f32.nxv2i32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.xu.v.nxv4f32.nxv4i32( + , + , + iXLen); + +define @intrinsic_vfcvt_f.xu.v_nxv4f32_nxv4i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv4f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv4f32.nxv4i32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_f.xu.v_nxv4f32_nxv4i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_f.xu.v_nxv4f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv4f32.nxv4i32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv4f32.nxv4i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_f.xu.v_nxv4f32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv4f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv4f32.nxv4i32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.xu.v.nxv8f32.nxv8i32( + , + , + iXLen); + +define @intrinsic_vfcvt_f.xu.v_nxv8f32_nxv8i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv8f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv8f32.nxv8i32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_f.xu.v_nxv8f32_nxv8i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_f.xu.v_nxv8f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv8f32.nxv8i32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv8f32.nxv8i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_f.xu.v_nxv8f32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv8f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv8f32.nxv8i32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.xu.v.nxv16f32.nxv16i32( + , + , + iXLen); + +define @intrinsic_vfcvt_f.xu.v_nxv16f32_nxv16i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv16f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv16f32.nxv16i32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_f.xu.v_nxv16f32_nxv16i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_f.xu.v_nxv16f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv16f32.nxv16i32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv16f32.nxv16i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_f.xu.v_nxv16f32_nxv16i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv16f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv16f32.nxv16i32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.xu.v.nxv1f64.nxv1i64( + , + , + iXLen); + +define @intrinsic_vfcvt_f.xu.v_nxv1f64_nxv1i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv1f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv1f64.nxv1i64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_f.xu.v_nxv1f64_nxv1i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_f.xu.v_nxv1f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv1f64.nxv1i64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv1f64.nxv1i64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_f.xu.v_nxv1f64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv1f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv1f64.nxv1i64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.xu.v.nxv2f64.nxv2i64( + , + , + iXLen); + +define @intrinsic_vfcvt_f.xu.v_nxv2f64_nxv2i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv2f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv2f64.nxv2i64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_f.xu.v_nxv2f64_nxv2i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_f.xu.v_nxv2f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv2f64.nxv2i64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv2f64.nxv2i64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_f.xu.v_nxv2f64_nxv2i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv2f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv2f64.nxv2i64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.xu.v.nxv4f64.nxv4i64( + , + , + iXLen); + +define @intrinsic_vfcvt_f.xu.v_nxv4f64_nxv4i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv4f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv4f64.nxv4i64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_f.xu.v_nxv4f64_nxv4i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_f.xu.v_nxv4f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv4f64.nxv4i64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv4f64.nxv4i64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_f.xu.v_nxv4f64_nxv4i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv4f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv4f64.nxv4i64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.xu.v.nxv8f64.nxv8i64( + , + , + iXLen); + +define @intrinsic_vfcvt_f.xu.v_nxv8f64_nxv8i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv8f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv8f64.nxv8i64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_f.xu.v_nxv8f64_nxv8i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_f.xu.v_nxv8f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv8f64.nxv8i64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv8f64.nxv8i64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_f.xu.v_nxv8f64_nxv8i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv8f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv8f64.nxv8i64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/strict-vfcvt-x-f.ll b/llvm/test/CodeGen/RISCV/rvv/strict-vfcvt-x-f.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/strict-vfcvt-x-f.ll @@ -0,0 +1,874 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s +declare @llvm.riscv.strict.vfcvt.x.f.v.nxv1i16.nxv1f16( + , + , + iXLen); + +define @intrinsic_vfcvt_x.f.v_nxv1i16_nxv1f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv1i16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.nxv1i16.nxv1f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_x.f.v_nxv1i16_nxv1f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_x.f.v_nxv1i16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.nxv1i16.nxv1f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv1i16.nxv1f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_x.f.v_nxv1i16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv1i16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv1i16.nxv1f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.x.f.v.nxv2i16.nxv2f16( + , + , + iXLen); + +define @intrinsic_vfcvt_x.f.v_nxv2i16_nxv2f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv2i16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.nxv2i16.nxv2f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_x.f.v_nxv2i16_nxv2f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_x.f.v_nxv2i16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.nxv2i16.nxv2f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv2i16.nxv2f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_x.f.v_nxv2i16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv2i16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv2i16.nxv2f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.x.f.v.nxv4i16.nxv4f16( + , + , + iXLen); + +define @intrinsic_vfcvt_x.f.v_nxv4i16_nxv4f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv4i16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.nxv4i16.nxv4f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_x.f.v_nxv4i16_nxv4f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_x.f.v_nxv4i16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.nxv4i16.nxv4f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv4i16.nxv4f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_x.f.v_nxv4i16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv4i16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv4i16.nxv4f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.x.f.v.nxv8i16.nxv8f16( + , + , + iXLen); + +define @intrinsic_vfcvt_x.f.v_nxv8i16_nxv8f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv8i16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.nxv8i16.nxv8f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_x.f.v_nxv8i16_nxv8f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_x.f.v_nxv8i16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.nxv8i16.nxv8f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv8i16.nxv8f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_x.f.v_nxv8i16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv8i16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv8i16.nxv8f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.x.f.v.nxv16i16.nxv16f16( + , + , + iXLen); + +define @intrinsic_vfcvt_x.f.v_nxv16i16_nxv16f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv16i16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.nxv16i16.nxv16f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_x.f.v_nxv16i16_nxv16f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_x.f.v_nxv16i16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.nxv16i16.nxv16f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv16i16.nxv16f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_x.f.v_nxv16i16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv16i16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv16i16.nxv16f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.x.f.v.nxv32i16.nxv32f16( + , + , + iXLen); + +define @intrinsic_vfcvt_x.f.v_nxv32i16_nxv32f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv32i16_nxv32f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.nxv32i16.nxv32f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_x.f.v_nxv32i16_nxv32f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_x.f.v_nxv32i16_nxv32f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.nxv32i16.nxv32f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv32i16.nxv32f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_x.f.v_nxv32i16_nxv32f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv32i16_nxv32f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv32i16.nxv32f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.x.f.v.nxv1i32.nxv1f32( + , + , + iXLen); + +define @intrinsic_vfcvt_x.f.v_nxv1i32_nxv1f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv1i32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.nxv1i32.nxv1f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_x.f.v_nxv1i32_nxv1f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_x.f.v_nxv1i32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.nxv1i32.nxv1f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv1i32.nxv1f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_x.f.v_nxv1i32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv1i32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv1i32.nxv1f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.x.f.v.nxv2i32.nxv2f32( + , + , + iXLen); + +define @intrinsic_vfcvt_x.f.v_nxv2i32_nxv2f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv2i32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.nxv2i32.nxv2f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_x.f.v_nxv2i32_nxv2f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_x.f.v_nxv2i32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.nxv2i32.nxv2f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv2i32.nxv2f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_x.f.v_nxv2i32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv2i32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv2i32.nxv2f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.x.f.v.nxv4i32.nxv4f32( + , + , + iXLen); + +define @intrinsic_vfcvt_x.f.v_nxv4i32_nxv4f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv4i32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.nxv4i32.nxv4f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_x.f.v_nxv4i32_nxv4f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_x.f.v_nxv4i32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.nxv4i32.nxv4f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv4i32.nxv4f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_x.f.v_nxv4i32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv4i32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv4i32.nxv4f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.x.f.v.nxv8i32.nxv8f32( + , + , + iXLen); + +define @intrinsic_vfcvt_x.f.v_nxv8i32_nxv8f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv8i32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.nxv8i32.nxv8f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_x.f.v_nxv8i32_nxv8f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_x.f.v_nxv8i32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.nxv8i32.nxv8f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv8i32.nxv8f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_x.f.v_nxv8i32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv8i32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv8i32.nxv8f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.x.f.v.nxv16i32.nxv16f32( + , + , + iXLen); + +define @intrinsic_vfcvt_x.f.v_nxv16i32_nxv16f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv16i32_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.nxv16i32.nxv16f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_x.f.v_nxv16i32_nxv16f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_x.f.v_nxv16i32_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.nxv16i32.nxv16f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv16i32.nxv16f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_x.f.v_nxv16i32_nxv16f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv16i32_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv16i32.nxv16f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.x.f.v.nxv1i64.nxv1f64( + , + , + iXLen); + +define @intrinsic_vfcvt_x.f.v_nxv1i64_nxv1f64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv1i64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.nxv1i64.nxv1f64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_x.f.v_nxv1i64_nxv1f64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_x.f.v_nxv1i64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.nxv1i64.nxv1f64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv1i64.nxv1f64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_x.f.v_nxv1i64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv1i64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv1i64.nxv1f64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.x.f.v.nxv2i64.nxv2f64( + , + , + iXLen); + +define @intrinsic_vfcvt_x.f.v_nxv2i64_nxv2f64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv2i64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.nxv2i64.nxv2f64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_x.f.v_nxv2i64_nxv2f64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_x.f.v_nxv2i64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.nxv2i64.nxv2f64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv2i64.nxv2f64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_x.f.v_nxv2i64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv2i64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv2i64.nxv2f64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.x.f.v.nxv4i64.nxv4f64( + , + , + iXLen); + +define @intrinsic_vfcvt_x.f.v_nxv4i64_nxv4f64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv4i64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.nxv4i64.nxv4f64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_x.f.v_nxv4i64_nxv4f64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_x.f.v_nxv4i64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.nxv4i64.nxv4f64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv4i64.nxv4f64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_x.f.v_nxv4i64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv4i64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv4i64.nxv4f64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.x.f.v.nxv8i64.nxv8f64( + , + , + iXLen); + +define @intrinsic_vfcvt_x.f.v_nxv8i64_nxv8f64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv8i64_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.nxv8i64.nxv8f64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_x.f.v_nxv8i64_nxv8f64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_x.f.v_nxv8i64_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.nxv8i64.nxv8f64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv8i64.nxv8f64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_x.f.v_nxv8i64_nxv8f64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv8i64_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv8i64.nxv8f64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/strict-vfcvt-xu-f.ll b/llvm/test/CodeGen/RISCV/rvv/strict-vfcvt-xu-f.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/strict-vfcvt-xu-f.ll @@ -0,0 +1,874 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s +declare @llvm.riscv.strict.vfcvt.xu.f.v.nxv1i16.nxv1f16( + , + , + iXLen); + +define @intrinsic_vfcvt_xu.f.v_nxv1i16_nxv1f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv1i16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv1i16.nxv1f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_xu.f.v_nxv1i16_nxv1f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_xu.f.v_nxv1i16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv1i16.nxv1f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.xu.f.v.nxv2i16.nxv2f16( + , + , + iXLen); + +define @intrinsic_vfcvt_xu.f.v_nxv2i16_nxv2f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv2i16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv2i16.nxv2f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_xu.f.v_nxv2i16_nxv2f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_xu.f.v_nxv2i16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv2i16.nxv2f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv2i16.nxv2f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_xu.f.v_nxv2i16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv2i16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv2i16.nxv2f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.xu.f.v.nxv4i16.nxv4f16( + , + , + iXLen); + +define @intrinsic_vfcvt_xu.f.v_nxv4i16_nxv4f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv4i16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv4i16.nxv4f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_xu.f.v_nxv4i16_nxv4f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_xu.f.v_nxv4i16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv4i16.nxv4f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv4i16.nxv4f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_xu.f.v_nxv4i16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv4i16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv4i16.nxv4f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.xu.f.v.nxv8i16.nxv8f16( + , + , + iXLen); + +define @intrinsic_vfcvt_xu.f.v_nxv8i16_nxv8f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv8i16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv8i16.nxv8f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_xu.f.v_nxv8i16_nxv8f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_xu.f.v_nxv8i16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv8i16.nxv8f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv8i16.nxv8f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_xu.f.v_nxv8i16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv8i16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv8i16.nxv8f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.xu.f.v.nxv16i16.nxv16f16( + , + , + iXLen); + +define @intrinsic_vfcvt_xu.f.v_nxv16i16_nxv16f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv16i16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv16i16.nxv16f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_xu.f.v_nxv16i16_nxv16f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_xu.f.v_nxv16i16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv16i16.nxv16f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv16i16.nxv16f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_xu.f.v_nxv16i16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv16i16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv16i16.nxv16f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.xu.f.v.nxv32i16.nxv32f16( + , + , + iXLen); + +define @intrinsic_vfcvt_xu.f.v_nxv32i16_nxv32f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv32i16_nxv32f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv32i16.nxv32f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_xu.f.v_nxv32i16_nxv32f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_xu.f.v_nxv32i16_nxv32f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv32i16.nxv32f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv32i16.nxv32f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_xu.f.v_nxv32i16_nxv32f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv32i16_nxv32f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv32i16.nxv32f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.xu.f.v.nxv1i32.nxv1f32( + , + , + iXLen); + +define @intrinsic_vfcvt_xu.f.v_nxv1i32_nxv1f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv1i32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv1i32.nxv1f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_xu.f.v_nxv1i32_nxv1f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_xu.f.v_nxv1i32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv1i32.nxv1f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv1i32.nxv1f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_xu.f.v_nxv1i32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv1i32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv1i32.nxv1f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.xu.f.v.nxv2i32.nxv2f32( + , + , + iXLen); + +define @intrinsic_vfcvt_xu.f.v_nxv2i32_nxv2f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv2i32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv2i32.nxv2f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_xu.f.v_nxv2i32_nxv2f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_xu.f.v_nxv2i32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv2i32.nxv2f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv2i32.nxv2f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_xu.f.v_nxv2i32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv2i32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv2i32.nxv2f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.xu.f.v.nxv4i32.nxv4f32( + , + , + iXLen); + +define @intrinsic_vfcvt_xu.f.v_nxv4i32_nxv4f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv4i32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv4i32.nxv4f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_xu.f.v_nxv4i32_nxv4f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_xu.f.v_nxv4i32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv4i32.nxv4f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv4i32.nxv4f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_xu.f.v_nxv4i32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv4i32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv4i32.nxv4f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.xu.f.v.nxv8i32.nxv8f32( + , + , + iXLen); + +define @intrinsic_vfcvt_xu.f.v_nxv8i32_nxv8f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv8i32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv8i32.nxv8f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_xu.f.v_nxv8i32_nxv8f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_xu.f.v_nxv8i32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv8i32.nxv8f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv8i32.nxv8f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_xu.f.v_nxv8i32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv8i32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv8i32.nxv8f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.xu.f.v.nxv16i32.nxv16f32( + , + , + iXLen); + +define @intrinsic_vfcvt_xu.f.v_nxv16i32_nxv16f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv16i32_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv16i32.nxv16f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_xu.f.v_nxv16i32_nxv16f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_xu.f.v_nxv16i32_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv16i32.nxv16f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv16i32.nxv16f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_xu.f.v_nxv16i32_nxv16f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv16i32_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv16i32.nxv16f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.xu.f.v.nxv1i64.nxv1f64( + , + , + iXLen); + +define @intrinsic_vfcvt_xu.f.v_nxv1i64_nxv1f64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv1i64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv1i64.nxv1f64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_xu.f.v_nxv1i64_nxv1f64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_xu.f.v_nxv1i64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv1i64.nxv1f64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv1i64.nxv1f64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_xu.f.v_nxv1i64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv1i64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv1i64.nxv1f64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.xu.f.v.nxv2i64.nxv2f64( + , + , + iXLen); + +define @intrinsic_vfcvt_xu.f.v_nxv2i64_nxv2f64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv2i64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv2i64.nxv2f64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_xu.f.v_nxv2i64_nxv2f64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_xu.f.v_nxv2i64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv2i64.nxv2f64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv2i64.nxv2f64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_xu.f.v_nxv2i64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv2i64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv2i64.nxv2f64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.xu.f.v.nxv4i64.nxv4f64( + , + , + iXLen); + +define @intrinsic_vfcvt_xu.f.v_nxv4i64_nxv4f64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv4i64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv4i64.nxv4f64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_xu.f.v_nxv4i64_nxv4f64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_xu.f.v_nxv4i64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv4i64.nxv4f64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv4i64.nxv4f64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_xu.f.v_nxv4i64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv4i64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv4i64.nxv4f64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.xu.f.v.nxv8i64.nxv8f64( + , + , + iXLen); + +define @intrinsic_vfcvt_xu.f.v_nxv8i64_nxv8f64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv8i64_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv8i64.nxv8f64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_xu.f.v_nxv8i64_nxv8f64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_xu.f.v_nxv8i64_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv8i64.nxv8f64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv8i64.nxv8f64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_xu.f.v_nxv8i64_nxv8f64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv8i64_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv8i64.nxv8f64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/strict-vfncvt-f-f.ll b/llvm/test/CodeGen/RISCV/rvv/strict-vfncvt-f-f.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/strict-vfncvt-f-f.ll @@ -0,0 +1,535 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s +declare @llvm.riscv.strict.vfncvt.f.f.w.nxv1f16.nxv1f32( + , + , + iXLen); + +define @intrinsic_vfncvt_f.f.w_nxv1f16_nxv1f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv1f16_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfncvt.f.f.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.f.w.nxv1f16.nxv1f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_f.f.w_nxv1f16_nxv1f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_f.f.w_nxv1f16_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfncvt.f.f.w v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.f.w.nxv1f16.nxv1f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.f.w.mask.nxv1f16.nxv1f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfncvt.f.f.w v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.f.w.mask.nxv1f16.nxv1f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.f.w.nxv2f16.nxv2f32( + , + , + iXLen); + +define @intrinsic_vfncvt_f.f.w_nxv2f16_nxv2f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv2f16_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfncvt.f.f.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.f.w.nxv2f16.nxv2f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_f.f.w_nxv2f16_nxv2f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_f.f.w_nxv2f16_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfncvt.f.f.w v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.f.w.nxv2f16.nxv2f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.f.w.mask.nxv2f16.nxv2f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_f.f.w_nxv2f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv2f16_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfncvt.f.f.w v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.f.w.mask.nxv2f16.nxv2f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.f.w.nxv4f16.nxv4f32( + , + , + iXLen); + +define @intrinsic_vfncvt_f.f.w_nxv4f16_nxv4f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv4f16_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfncvt.f.f.w v10, v8 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.f.w.nxv4f16.nxv4f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_f.f.w_nxv4f16_nxv4f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_f.f.w_nxv4f16_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfncvt.f.f.w v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.f.w.nxv4f16.nxv4f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.f.w.mask.nxv4f16.nxv4f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_f.f.w_nxv4f16_nxv4f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv4f16_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfncvt.f.f.w v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.f.w.mask.nxv4f16.nxv4f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.f.w.nxv8f16.nxv8f32( + , + , + iXLen); + +define @intrinsic_vfncvt_f.f.w_nxv8f16_nxv8f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv8f16_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfncvt.f.f.w v12, v8 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.f.w.nxv8f16.nxv8f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_f.f.w_nxv8f16_nxv8f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_f.f.w_nxv8f16_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfncvt.f.f.w v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.f.w.nxv8f16.nxv8f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.f.w.mask.nxv8f16.nxv8f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_f.f.w_nxv8f16_nxv8f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv8f16_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfncvt.f.f.w v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.f.w.mask.nxv8f16.nxv8f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.f.w.nxv16f16.nxv16f32( + , + , + iXLen); + +define @intrinsic_vfncvt_f.f.w_nxv16f16_nxv16f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv16f16_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfncvt.f.f.w v16, v8 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.f.w.nxv16f16.nxv16f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_f.f.w_nxv16f16_nxv16f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_f.f.w_nxv16f16_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfncvt.f.f.w v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.f.w.nxv16f16.nxv16f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.f.w.mask.nxv16f16.nxv16f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_f.f.w_nxv16f16_nxv16f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv16f16_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfncvt.f.f.w v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.f.w.mask.nxv16f16.nxv16f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.f.w.nxv1f32.nxv1f64( + , + , + iXLen); + +define @intrinsic_vfncvt_f.f.w_nxv1f32_nxv1f64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv1f32_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfncvt.f.f.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.f.w.nxv1f32.nxv1f64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_f.f.w_nxv1f32_nxv1f64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_f.f.w_nxv1f32_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfncvt.f.f.w v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.f.w.nxv1f32.nxv1f64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.f.w.mask.nxv1f32.nxv1f64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_f.f.w_nxv1f32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv1f32_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfncvt.f.f.w v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.f.w.mask.nxv1f32.nxv1f64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.f.w.nxv2f32.nxv2f64( + , + , + iXLen); + +define @intrinsic_vfncvt_f.f.w_nxv2f32_nxv2f64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv2f32_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfncvt.f.f.w v10, v8 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.f.w.nxv2f32.nxv2f64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_f.f.w_nxv2f32_nxv2f64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_f.f.w_nxv2f32_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfncvt.f.f.w v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.f.w.nxv2f32.nxv2f64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.f.w.mask.nxv2f32.nxv2f64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_f.f.w_nxv2f32_nxv2f64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv2f32_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfncvt.f.f.w v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.f.w.mask.nxv2f32.nxv2f64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.f.w.nxv4f32.nxv4f64( + , + , + iXLen); + +define @intrinsic_vfncvt_f.f.w_nxv4f32_nxv4f64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv4f32_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfncvt.f.f.w v12, v8 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.f.w.nxv4f32.nxv4f64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_f.f.w_nxv4f32_nxv4f64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_f.f.w_nxv4f32_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfncvt.f.f.w v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.f.w.nxv4f32.nxv4f64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.f.w.mask.nxv4f32.nxv4f64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_f.f.w_nxv4f32_nxv4f64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv4f32_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfncvt.f.f.w v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.f.w.mask.nxv4f32.nxv4f64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.f.w.nxv8f32.nxv8f64( + , + , + iXLen); + +define @intrinsic_vfncvt_f.f.w_nxv8f32_nxv8f64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv8f32_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfncvt.f.f.w v16, v8 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.f.w.nxv8f32.nxv8f64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_f.f.w_nxv8f32_nxv8f64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_f.f.w_nxv8f32_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfncvt.f.f.w v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.f.w.nxv8f32.nxv8f64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.f.w.mask.nxv8f32.nxv8f64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_f.f.w_nxv8f32_nxv8f64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv8f32_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfncvt.f.f.w v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.f.w.mask.nxv8f32.nxv8f64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/strict-vfncvt-f-x.ll b/llvm/test/CodeGen/RISCV/rvv/strict-vfncvt-f-x.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/strict-vfncvt-f-x.ll @@ -0,0 +1,535 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s +declare @llvm.riscv.strict.vfncvt.f.x.w.nxv1f16.nxv1i32( + , + , + iXLen); + +define @intrinsic_vfncvt_f.x.w_nxv1f16_nxv1i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.x.w.nxv1f16.nxv1i32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_f.x.w_nxv1f16_nxv1i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_f.x.w_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfncvt.f.x.w v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.x.w.nxv1f16.nxv1i32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.x.w.mask.nxv1f16.nxv1i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.x.w.mask.nxv1f16.nxv1i32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.x.w.nxv2f16.nxv2i32( + , + , + iXLen); + +define @intrinsic_vfncvt_f.x.w_nxv2f16_nxv2i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.x.w.nxv2f16.nxv2i32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_f.x.w_nxv2f16_nxv2i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_f.x.w_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfncvt.f.x.w v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.x.w.nxv2f16.nxv2i32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.x.w.mask.nxv2f16.nxv2i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_f.x.w_nxv2f16_nxv2i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.x.w.mask.nxv2f16.nxv2i32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.x.w.nxv4f16.nxv4i32( + , + , + iXLen); + +define @intrinsic_vfncvt_f.x.w_nxv4f16_nxv4i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v10, v8 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.x.w.nxv4f16.nxv4i32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_f.x.w_nxv4f16_nxv4i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_f.x.w_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfncvt.f.x.w v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.x.w.nxv4f16.nxv4i32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.x.w.mask.nxv4f16.nxv4i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_f.x.w_nxv4f16_nxv4i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.x.w.mask.nxv4f16.nxv4i32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.x.w.nxv8f16.nxv8i32( + , + , + iXLen); + +define @intrinsic_vfncvt_f.x.w_nxv8f16_nxv8i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv8f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v12, v8 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.x.w.nxv8f16.nxv8i32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_f.x.w_nxv8f16_nxv8i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_f.x.w_nxv8f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfncvt.f.x.w v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.x.w.nxv8f16.nxv8i32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.x.w.mask.nxv8f16.nxv8i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_f.x.w_nxv8f16_nxv8i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv8f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.x.w.mask.nxv8f16.nxv8i32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.x.w.nxv16f16.nxv16i32( + , + , + iXLen); + +define @intrinsic_vfncvt_f.x.w_nxv16f16_nxv16i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv16f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v16, v8 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.x.w.nxv16f16.nxv16i32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_f.x.w_nxv16f16_nxv16i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_f.x.w_nxv16f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfncvt.f.x.w v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.x.w.nxv16f16.nxv16i32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.x.w.mask.nxv16f16.nxv16i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_f.x.w_nxv16f16_nxv16i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv16f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.x.w.mask.nxv16f16.nxv16i32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.x.w.nxv1f32.nxv1i64( + , + , + iXLen); + +define @intrinsic_vfncvt_f.x.w_nxv1f32_nxv1i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv1f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.x.w.nxv1f32.nxv1i64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_f.x.w_nxv1f32_nxv1i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_f.x.w_nxv1f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfncvt.f.x.w v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.x.w.nxv1f32.nxv1i64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.x.w.mask.nxv1f32.nxv1i64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_f.x.w_nxv1f32_nxv1i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv1f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.x.w.mask.nxv1f32.nxv1i64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.x.w.nxv2f32.nxv2i64( + , + , + iXLen); + +define @intrinsic_vfncvt_f.x.w_nxv2f32_nxv2i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv2f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v10, v8 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.x.w.nxv2f32.nxv2i64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_f.x.w_nxv2f32_nxv2i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_f.x.w_nxv2f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfncvt.f.x.w v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.x.w.nxv2f32.nxv2i64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.x.w.mask.nxv2f32.nxv2i64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_f.x.w_nxv2f32_nxv2i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv2f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.x.w.mask.nxv2f32.nxv2i64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.x.w.nxv4f32.nxv4i64( + , + , + iXLen); + +define @intrinsic_vfncvt_f.x.w_nxv4f32_nxv4i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv4f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v12, v8 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.x.w.nxv4f32.nxv4i64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_f.x.w_nxv4f32_nxv4i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_f.x.w_nxv4f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfncvt.f.x.w v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.x.w.nxv4f32.nxv4i64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.x.w.mask.nxv4f32.nxv4i64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_f.x.w_nxv4f32_nxv4i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv4f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.x.w.mask.nxv4f32.nxv4i64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.x.w.nxv8f32.nxv8i64( + , + , + iXLen); + +define @intrinsic_vfncvt_f.x.w_nxv8f32_nxv8i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv8f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v16, v8 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.x.w.nxv8f32.nxv8i64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_f.x.w_nxv8f32_nxv8i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_f.x.w_nxv8f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfncvt.f.x.w v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.x.w.nxv8f32.nxv8i64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.x.w.mask.nxv8f32.nxv8i64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_f.x.w_nxv8f32_nxv8i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv8f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.x.w.mask.nxv8f32.nxv8i64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/strict-vfncvt-f-xu.ll b/llvm/test/CodeGen/RISCV/rvv/strict-vfncvt-f-xu.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/strict-vfncvt-f-xu.ll @@ -0,0 +1,535 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s +declare @llvm.riscv.strict.vfncvt.f.xu.w.nxv1f16.nxv1i32( + , + , + iXLen); + +define @intrinsic_vfncvt_f.xu.w_nxv1f16_nxv1i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfncvt.f.xu.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.xu.w.nxv1f16.nxv1i32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_f.xu.w_nxv1f16_nxv1i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_f.xu.w_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfncvt.f.xu.w v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.xu.w.nxv1f16.nxv1i32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.xu.w.mask.nxv1f16.nxv1i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_f.xu.w_nxv1f16_nxv1i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfncvt.f.xu.w v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.xu.w.mask.nxv1f16.nxv1i32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.xu.w.nxv2f16.nxv2i32( + , + , + iXLen); + +define @intrinsic_vfncvt_f.xu.w_nxv2f16_nxv2i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfncvt.f.xu.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.xu.w.nxv2f16.nxv2i32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_f.xu.w_nxv2f16_nxv2i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_f.xu.w_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfncvt.f.xu.w v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.xu.w.nxv2f16.nxv2i32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.xu.w.mask.nxv2f16.nxv2i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_f.xu.w_nxv2f16_nxv2i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfncvt.f.xu.w v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.xu.w.mask.nxv2f16.nxv2i32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.xu.w.nxv4f16.nxv4i32( + , + , + iXLen); + +define @intrinsic_vfncvt_f.xu.w_nxv4f16_nxv4i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfncvt.f.xu.w v10, v8 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.xu.w.nxv4f16.nxv4i32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_f.xu.w_nxv4f16_nxv4i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_f.xu.w_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfncvt.f.xu.w v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.xu.w.nxv4f16.nxv4i32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.xu.w.mask.nxv4f16.nxv4i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_f.xu.w_nxv4f16_nxv4i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfncvt.f.xu.w v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.xu.w.mask.nxv4f16.nxv4i32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.xu.w.nxv8f16.nxv8i32( + , + , + iXLen); + +define @intrinsic_vfncvt_f.xu.w_nxv8f16_nxv8i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv8f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfncvt.f.xu.w v12, v8 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.xu.w.nxv8f16.nxv8i32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_f.xu.w_nxv8f16_nxv8i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_f.xu.w_nxv8f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfncvt.f.xu.w v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.xu.w.nxv8f16.nxv8i32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.xu.w.mask.nxv8f16.nxv8i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_f.xu.w_nxv8f16_nxv8i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv8f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfncvt.f.xu.w v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.xu.w.mask.nxv8f16.nxv8i32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.xu.w.nxv16f16.nxv16i32( + , + , + iXLen); + +define @intrinsic_vfncvt_f.xu.w_nxv16f16_nxv16i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv16f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfncvt.f.xu.w v16, v8 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.xu.w.nxv16f16.nxv16i32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_f.xu.w_nxv16f16_nxv16i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_f.xu.w_nxv16f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfncvt.f.xu.w v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.xu.w.nxv16f16.nxv16i32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.xu.w.mask.nxv16f16.nxv16i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_f.xu.w_nxv16f16_nxv16i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv16f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfncvt.f.xu.w v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.xu.w.mask.nxv16f16.nxv16i32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.xu.w.nxv1f32.nxv1i64( + , + , + iXLen); + +define @intrinsic_vfncvt_f.xu.w_nxv1f32_nxv1i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv1f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfncvt.f.xu.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.xu.w.nxv1f32.nxv1i64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_f.xu.w_nxv1f32_nxv1i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_f.xu.w_nxv1f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfncvt.f.xu.w v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.xu.w.nxv1f32.nxv1i64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.xu.w.mask.nxv1f32.nxv1i64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_f.xu.w_nxv1f32_nxv1i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv1f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfncvt.f.xu.w v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.xu.w.mask.nxv1f32.nxv1i64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.xu.w.nxv2f32.nxv2i64( + , + , + iXLen); + +define @intrinsic_vfncvt_f.xu.w_nxv2f32_nxv2i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv2f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfncvt.f.xu.w v10, v8 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.xu.w.nxv2f32.nxv2i64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_f.xu.w_nxv2f32_nxv2i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_f.xu.w_nxv2f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfncvt.f.xu.w v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.xu.w.nxv2f32.nxv2i64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.xu.w.mask.nxv2f32.nxv2i64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_f.xu.w_nxv2f32_nxv2i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv2f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfncvt.f.xu.w v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.xu.w.mask.nxv2f32.nxv2i64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.xu.w.nxv4f32.nxv4i64( + , + , + iXLen); + +define @intrinsic_vfncvt_f.xu.w_nxv4f32_nxv4i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv4f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfncvt.f.xu.w v12, v8 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.xu.w.nxv4f32.nxv4i64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_f.xu.w_nxv4f32_nxv4i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_f.xu.w_nxv4f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfncvt.f.xu.w v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.xu.w.nxv4f32.nxv4i64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.xu.w.mask.nxv4f32.nxv4i64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_f.xu.w_nxv4f32_nxv4i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv4f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfncvt.f.xu.w v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.xu.w.mask.nxv4f32.nxv4i64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.xu.w.nxv8f32.nxv8i64( + , + , + iXLen); + +define @intrinsic_vfncvt_f.xu.w_nxv8f32_nxv8i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv8f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfncvt.f.xu.w v16, v8 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.xu.w.nxv8f32.nxv8i64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_f.xu.w_nxv8f32_nxv8i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_f.xu.w_nxv8f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfncvt.f.xu.w v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.xu.w.nxv8f32.nxv8i64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.xu.w.mask.nxv8f32.nxv8i64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_f.xu.w_nxv8f32_nxv8i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv8f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfncvt.f.xu.w v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.xu.w.mask.nxv8f32.nxv8i64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/strict-vfncvt-x-f.ll b/llvm/test/CodeGen/RISCV/rvv/strict-vfncvt-x-f.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/strict-vfncvt-x-f.ll @@ -0,0 +1,889 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s +declare @llvm.riscv.strict.vfncvt.x.f.w.nxv1i8.nxv1f16( + , + , + iXLen); + +define @intrinsic_vfncvt_x.f.w_nxv1i8_nxv1f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv1i8_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.nxv1i8.nxv1f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_x.f.w_nxv1i8_nxv1f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_x.f.w_nxv1i8_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.nxv1i8.nxv1f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv1i8.nxv1f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_x.f.w_nxv1i8_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv1i8_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv1i8.nxv1f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.x.f.w.nxv2i8.nxv2f16( + , + , + iXLen); + +define @intrinsic_vfncvt_x.f.w_nxv2i8_nxv2f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv2i8_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.nxv2i8.nxv2f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_x.f.w_nxv2i8_nxv2f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_x.f.w_nxv2i8_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.nxv2i8.nxv2f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv2i8.nxv2f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_x.f.w_nxv2i8_nxv2f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv2i8_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv2i8.nxv2f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.x.f.w.nxv4i8.nxv4f16( + , + , + iXLen); + +define @intrinsic_vfncvt_x.f.w_nxv4i8_nxv4f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv4i8_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.nxv4i8.nxv4f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_x.f.w_nxv4i8_nxv4f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_x.f.w_nxv4i8_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.nxv4i8.nxv4f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv4i8.nxv4f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_x.f.w_nxv4i8_nxv4f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv4i8_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv4i8.nxv4f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.x.f.w.nxv8i8.nxv8f16( + , + , + iXLen); + +define @intrinsic_vfncvt_x.f.w_nxv8i8_nxv8f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv8i8_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v10, v8 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.nxv8i8.nxv8f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_x.f.w_nxv8i8_nxv8f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_x.f.w_nxv8i8_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.nxv8i8.nxv8f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv8i8.nxv8f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_x.f.w_nxv8i8_nxv8f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv8i8_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv8i8.nxv8f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.x.f.w.nxv16i8.nxv16f16( + , + , + iXLen); + +define @intrinsic_vfncvt_x.f.w_nxv16i8_nxv16f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv16i8_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v12, v8 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.nxv16i8.nxv16f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_x.f.w_nxv16i8_nxv16f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_x.f.w_nxv16i8_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.nxv16i8.nxv16f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv16i8.nxv16f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_x.f.w_nxv16i8_nxv16f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv16i8_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv16i8.nxv16f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.x.f.w.nxv32i8.nxv32f16( + , + , + iXLen); + +define @intrinsic_vfncvt_x.f.w_nxv32i8_nxv32f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv32i8_nxv32f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v16, v8 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.nxv32i8.nxv32f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_x.f.w_nxv32i8_nxv32f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_x.f.w_nxv32i8_nxv32f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.nxv32i8.nxv32f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv32i8.nxv32f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_x.f.w_nxv32i8_nxv32f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv32i8_nxv32f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv32i8.nxv32f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.x.f.w.nxv1i16.nxv1f32( + , + , + iXLen); + +define @intrinsic_vfncvt_x.f.w_nxv1i16_nxv1f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv1i16_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.nxv1i16.nxv1f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_x.f.w_nxv1i16_nxv1f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_x.f.w_nxv1i16_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.nxv1i16.nxv1f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv1i16.nxv1f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_x.f.w_nxv1i16_nxv1f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv1i16_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv1i16.nxv1f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.x.f.w.nxv2i16.nxv2f32( + , + , + iXLen); + +define @intrinsic_vfncvt_x.f.w_nxv2i16_nxv2f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv2i16_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.nxv2i16.nxv2f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_x.f.w_nxv2i16_nxv2f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_x.f.w_nxv2i16_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.nxv2i16.nxv2f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv2i16.nxv2f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_x.f.w_nxv2i16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv2i16_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv2i16.nxv2f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.x.f.w.nxv4i16.nxv4f32( + , + , + iXLen); + +define @intrinsic_vfncvt_x.f.w_nxv4i16_nxv4f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv4i16_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v10, v8 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.nxv4i16.nxv4f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_x.f.w_nxv4i16_nxv4f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_x.f.w_nxv4i16_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.nxv4i16.nxv4f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv4i16.nxv4f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_x.f.w_nxv4i16_nxv4f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv4i16_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv4i16.nxv4f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.x.f.w.nxv8i16.nxv8f32( + , + , + iXLen); + +define @intrinsic_vfncvt_x.f.w_nxv8i16_nxv8f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv8i16_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v12, v8 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.nxv8i16.nxv8f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_x.f.w_nxv8i16_nxv8f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_x.f.w_nxv8i16_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.nxv8i16.nxv8f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv8i16.nxv8f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_x.f.w_nxv8i16_nxv8f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv8i16_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv8i16.nxv8f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.x.f.w.nxv16i16.nxv16f32( + , + , + iXLen); + +define @intrinsic_vfncvt_x.f.w_nxv16i16_nxv16f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv16i16_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v16, v8 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.nxv16i16.nxv16f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_x.f.w_nxv16i16_nxv16f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_x.f.w_nxv16i16_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.nxv16i16.nxv16f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv16i16.nxv16f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_x.f.w_nxv16i16_nxv16f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv16i16_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv16i16.nxv16f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.x.f.w.nxv1i32.nxv1f64( + , + , + iXLen); + +define @intrinsic_vfncvt_x.f.w_nxv1i32_nxv1f64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv1i32_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.nxv1i32.nxv1f64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_x.f.w_nxv1i32_nxv1f64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_x.f.w_nxv1i32_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.nxv1i32.nxv1f64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv1i32.nxv1f64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_x.f.w_nxv1i32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv1i32_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv1i32.nxv1f64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.x.f.w.nxv2i32.nxv2f64( + , + , + iXLen); + +define @intrinsic_vfncvt_x.f.w_nxv2i32_nxv2f64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv2i32_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v10, v8 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.nxv2i32.nxv2f64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_x.f.w_nxv2i32_nxv2f64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_x.f.w_nxv2i32_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.nxv2i32.nxv2f64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv2i32.nxv2f64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_x.f.w_nxv2i32_nxv2f64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv2i32_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv2i32.nxv2f64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.x.f.w.nxv4i32.nxv4f64( + , + , + iXLen); + +define @intrinsic_vfncvt_x.f.w_nxv4i32_nxv4f64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv4i32_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v12, v8 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.nxv4i32.nxv4f64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_x.f.w_nxv4i32_nxv4f64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_x.f.w_nxv4i32_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.nxv4i32.nxv4f64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv4i32.nxv4f64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_x.f.w_nxv4i32_nxv4f64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv4i32_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv4i32.nxv4f64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.x.f.w.nxv8i32.nxv8f64( + , + , + iXLen); + +define @intrinsic_vfncvt_x.f.w_nxv8i32_nxv8f64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv8i32_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v16, v8 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.nxv8i32.nxv8f64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_x.f.w_nxv8i32_nxv8f64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_x.f.w_nxv8i32_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.nxv8i32.nxv8f64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv8i32.nxv8f64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_x.f.w_nxv8i32_nxv8f64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv8i32_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv8i32.nxv8f64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/strict-vfncvt-xu-f.ll b/llvm/test/CodeGen/RISCV/rvv/strict-vfncvt-xu-f.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/strict-vfncvt-xu-f.ll @@ -0,0 +1,889 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s +declare @llvm.riscv.strict.vfncvt.xu.f.w.nxv1i8.nxv1f16( + , + , + iXLen); + +define @intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv1i8.nxv1f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_xu.f.w_nxv1i8_nxv1f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_xu.f.w_nxv1i8_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv1i8.nxv1f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.xu.f.w.nxv2i8.nxv2f16( + , + , + iXLen); + +define @intrinsic_vfncvt_xu.f.w_nxv2i8_nxv2f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv2i8_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv2i8.nxv2f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_xu.f.w_nxv2i8_nxv2f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_xu.f.w_nxv2i8_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv2i8.nxv2f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv2i8.nxv2f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv2i8_nxv2f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv2i8_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv2i8.nxv2f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.xu.f.w.nxv4i8.nxv4f16( + , + , + iXLen); + +define @intrinsic_vfncvt_xu.f.w_nxv4i8_nxv4f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv4i8_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv4i8.nxv4f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_xu.f.w_nxv4i8_nxv4f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_xu.f.w_nxv4i8_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv4i8.nxv4f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv4i8.nxv4f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv4i8_nxv4f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv4i8_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv4i8.nxv4f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.xu.f.w.nxv8i8.nxv8f16( + , + , + iXLen); + +define @intrinsic_vfncvt_xu.f.w_nxv8i8_nxv8f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv8i8_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v10, v8 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv8i8.nxv8f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_xu.f.w_nxv8i8_nxv8f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_xu.f.w_nxv8i8_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv8i8.nxv8f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv8i8.nxv8f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv8i8_nxv8f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv8i8_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv8i8.nxv8f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.xu.f.w.nxv16i8.nxv16f16( + , + , + iXLen); + +define @intrinsic_vfncvt_xu.f.w_nxv16i8_nxv16f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv16i8_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v12, v8 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv16i8.nxv16f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_xu.f.w_nxv16i8_nxv16f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_xu.f.w_nxv16i8_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv16i8.nxv16f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv16i8.nxv16f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv16i8_nxv16f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv16i8_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv16i8.nxv16f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.xu.f.w.nxv32i8.nxv32f16( + , + , + iXLen); + +define @intrinsic_vfncvt_xu.f.w_nxv32i8_nxv32f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv32i8_nxv32f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v16, v8 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv32i8.nxv32f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_xu.f.w_nxv32i8_nxv32f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_xu.f.w_nxv32i8_nxv32f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv32i8.nxv32f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv32i8.nxv32f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv32i8_nxv32f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv32i8_nxv32f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv32i8.nxv32f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.xu.f.w.nxv1i16.nxv1f32( + , + , + iXLen); + +define @intrinsic_vfncvt_xu.f.w_nxv1i16_nxv1f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv1i16_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv1i16.nxv1f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_xu.f.w_nxv1i16_nxv1f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_xu.f.w_nxv1i16_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv1i16.nxv1f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv1i16_nxv1f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i16_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.xu.f.w.nxv2i16.nxv2f32( + , + , + iXLen); + +define @intrinsic_vfncvt_xu.f.w_nxv2i16_nxv2f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv2i16_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv2i16.nxv2f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_xu.f.w_nxv2i16_nxv2f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_xu.f.w_nxv2i16_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv2i16.nxv2f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv2i16.nxv2f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv2i16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv2i16_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv2i16.nxv2f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.xu.f.w.nxv4i16.nxv4f32( + , + , + iXLen); + +define @intrinsic_vfncvt_xu.f.w_nxv4i16_nxv4f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv4i16_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v10, v8 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv4i16.nxv4f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_xu.f.w_nxv4i16_nxv4f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_xu.f.w_nxv4i16_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv4i16.nxv4f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv4i16.nxv4f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv4i16_nxv4f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv4i16_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv4i16.nxv4f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.xu.f.w.nxv8i16.nxv8f32( + , + , + iXLen); + +define @intrinsic_vfncvt_xu.f.w_nxv8i16_nxv8f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv8i16_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v12, v8 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv8i16.nxv8f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_xu.f.w_nxv8i16_nxv8f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_xu.f.w_nxv8i16_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv8i16.nxv8f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv8i16.nxv8f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv8i16_nxv8f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv8i16_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv8i16.nxv8f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.xu.f.w.nxv16i16.nxv16f32( + , + , + iXLen); + +define @intrinsic_vfncvt_xu.f.w_nxv16i16_nxv16f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv16i16_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v16, v8 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv16i16.nxv16f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_xu.f.w_nxv16i16_nxv16f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_xu.f.w_nxv16i16_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv16i16.nxv16f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv16i16.nxv16f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv16i16_nxv16f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv16i16_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv16i16.nxv16f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.xu.f.w.nxv1i32.nxv1f64( + , + , + iXLen); + +define @intrinsic_vfncvt_xu.f.w_nxv1i32_nxv1f64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv1i32_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv1i32.nxv1f64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_xu.f.w_nxv1i32_nxv1f64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_xu.f.w_nxv1i32_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv1i32.nxv1f64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv1i32.nxv1f64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv1i32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i32_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv1i32.nxv1f64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.xu.f.w.nxv2i32.nxv2f64( + , + , + iXLen); + +define @intrinsic_vfncvt_xu.f.w_nxv2i32_nxv2f64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv2i32_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v10, v8 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv2i32.nxv2f64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_xu.f.w_nxv2i32_nxv2f64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_xu.f.w_nxv2i32_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv2i32.nxv2f64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv2i32.nxv2f64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv2i32_nxv2f64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv2i32_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv2i32.nxv2f64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.xu.f.w.nxv4i32.nxv4f64( + , + , + iXLen); + +define @intrinsic_vfncvt_xu.f.w_nxv4i32_nxv4f64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv4i32_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v12, v8 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv4i32.nxv4f64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_xu.f.w_nxv4i32_nxv4f64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_xu.f.w_nxv4i32_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv4i32.nxv4f64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv4i32.nxv4f64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv4i32_nxv4f64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv4i32_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv4i32.nxv4f64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.xu.f.w.nxv8i32.nxv8f64( + , + , + iXLen); + +define @intrinsic_vfncvt_xu.f.w_nxv8i32_nxv8f64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv8i32_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v16, v8 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv8i32.nxv8f64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_xu.f.w_nxv8i32_nxv8f64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_xu.f.w_nxv8i32_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv8i32.nxv8f64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv8i32_nxv8f64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv8i32_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/strict-vfwcvt-f-f.ll b/llvm/test/CodeGen/RISCV/rvv/strict-vfwcvt-f-f.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/strict-vfwcvt-f-f.ll @@ -0,0 +1,535 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s +declare @llvm.riscv.strict.vfwcvt.f.f.v.nxv1f32.nxv1f16( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.f.v_nxv1f32_nxv1f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv1f32_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfwcvt.f.f.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.f.v.nxv1f32.nxv1f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.f.v_nxv1f32_nxv1f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.f.v_nxv1f32_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfwcvt.f.f.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.f.v.nxv1f32.nxv1f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfwcvt.f.f.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.f.v.nxv2f32.nxv2f16( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.f.v_nxv2f32_nxv2f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv2f32_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwcvt.f.f.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.f.v.nxv2f32.nxv2f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.f.v_nxv2f32_nxv2f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.f.v_nxv2f32_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfwcvt.f.f.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.f.v.nxv2f32.nxv2f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.f.v.mask.nxv2f32.nxv2f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.f.v_nxv2f32_nxv2f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv2f32_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwcvt.f.f.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.f.v.mask.nxv2f32.nxv2f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.f.v.nxv4f32.nxv4f16( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.f.v_nxv4f32_nxv4f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv4f32_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfwcvt.f.f.v v10, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.f.v.nxv4f32.nxv4f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.f.v_nxv4f32_nxv4f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.f.v_nxv4f32_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfwcvt.f.f.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.f.v.nxv4f32.nxv4f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.f.v.mask.nxv4f32.nxv4f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.f.v_nxv4f32_nxv4f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv4f32_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfwcvt.f.f.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.f.v.mask.nxv4f32.nxv4f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.f.v.nxv8f32.nxv8f16( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.f.v_nxv8f32_nxv8f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv8f32_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfwcvt.f.f.v v12, v8 +; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.f.v.nxv8f32.nxv8f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.f.v_nxv8f32_nxv8f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.f.v_nxv8f32_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfwcvt.f.f.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.f.v.nxv8f32.nxv8f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.f.v.mask.nxv8f32.nxv8f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.f.v_nxv8f32_nxv8f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv8f32_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfwcvt.f.f.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.f.v.mask.nxv8f32.nxv8f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.f.v.nxv16f32.nxv16f16( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.f.v_nxv16f32_nxv16f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv16f32_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfwcvt.f.f.v v16, v8 +; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.f.v.nxv16f32.nxv16f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.f.v_nxv16f32_nxv16f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.f.v_nxv16f32_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfwcvt.f.f.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.f.v.nxv16f32.nxv16f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.f.v.mask.nxv16f32.nxv16f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.f.v_nxv16f32_nxv16f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv16f32_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfwcvt.f.f.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.f.v.mask.nxv16f32.nxv16f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.f.v.nxv1f64.nxv1f32( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.f.v_nxv1f64_nxv1f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv1f64_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfwcvt.f.f.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.f.v.nxv1f64.nxv1f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.f.v_nxv1f64_nxv1f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.f.v_nxv1f64_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfwcvt.f.f.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.f.v.nxv1f64.nxv1f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.f.v.mask.nxv1f64.nxv1f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.f.v_nxv1f64_nxv1f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv1f64_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfwcvt.f.f.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.f.v.mask.nxv1f64.nxv1f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.f.v.nxv2f64.nxv2f32( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.f.v_nxv2f64_nxv2f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv2f64_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfwcvt.f.f.v v10, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.f.v.nxv2f64.nxv2f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.f.v_nxv2f64_nxv2f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.f.v_nxv2f64_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfwcvt.f.f.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.f.v.nxv2f64.nxv2f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.f.v.mask.nxv2f64.nxv2f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.f.v_nxv2f64_nxv2f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv2f64_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfwcvt.f.f.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.f.v.mask.nxv2f64.nxv2f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.f.v.nxv4f64.nxv4f32( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.f.v_nxv4f64_nxv4f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv4f64_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfwcvt.f.f.v v12, v8 +; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.f.v.nxv4f64.nxv4f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.f.v_nxv4f64_nxv4f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.f.v_nxv4f64_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfwcvt.f.f.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.f.v.nxv4f64.nxv4f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.f.v.mask.nxv4f64.nxv4f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.f.v_nxv4f64_nxv4f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv4f64_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfwcvt.f.f.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.f.v.mask.nxv4f64.nxv4f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.f.v.nxv8f64.nxv8f32( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.f.v_nxv8f64_nxv8f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv8f64_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfwcvt.f.f.v v16, v8 +; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.f.v.nxv8f64.nxv8f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.f.v_nxv8f64_nxv8f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.f.v_nxv8f64_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfwcvt.f.f.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.f.v.nxv8f64.nxv8f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.f.v.mask.nxv8f64.nxv8f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.f.v_nxv8f64_nxv8f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv8f64_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfwcvt.f.f.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.f.v.mask.nxv8f64.nxv8f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/strict-vfwcvt-f-x.ll b/llvm/test/CodeGen/RISCV/rvv/strict-vfwcvt-f-x.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/strict-vfwcvt-f-x.ll @@ -0,0 +1,889 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s +declare @llvm.riscv.strict.vfwcvt.f.x.v.nxv1f16.nxv1i8( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.x.v_nxv1f16_nxv1i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv1f16.nxv1i8( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.x.v_nxv1f16_nxv1i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.x.v_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv1f16.nxv1i8( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.x.v.nxv2f16.nxv2i8( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.x.v_nxv2f16_nxv2i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv2f16.nxv2i8( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.x.v_nxv2f16_nxv2i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.x.v_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv2f16.nxv2i8( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv2f16.nxv2i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.x.v_nxv2f16_nxv2i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv2f16.nxv2i8( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.x.v.nxv4f16.nxv4i8( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.x.v_nxv4f16_nxv4i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv4f16.nxv4i8( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.x.v_nxv4f16_nxv4i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.x.v_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv4f16.nxv4i8( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv4f16.nxv4i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.x.v_nxv4f16_nxv4i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv4f16.nxv4i8( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.x.v.nxv8f16.nxv8i8( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.x.v_nxv8f16_nxv8i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv8f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v10, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv8f16.nxv8i8( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.x.v_nxv8f16_nxv8i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.x.v_nxv8f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv8f16.nxv8i8( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv8f16.nxv8i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.x.v_nxv8f16_nxv8i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv8f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv8f16.nxv8i8( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.x.v.nxv16f16.nxv16i8( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.x.v_nxv16f16_nxv16i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv16f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v12, v8 +; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv16f16.nxv16i8( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.x.v_nxv16f16_nxv16i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.x.v_nxv16f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv16f16.nxv16i8( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv16f16.nxv16i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.x.v_nxv16f16_nxv16i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv16f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv16f16.nxv16i8( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.x.v.nxv32f16.nxv32i8( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.x.v_nxv32f16_nxv32i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv32f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v16, v8 +; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv32f16.nxv32i8( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.x.v_nxv32f16_nxv32i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.x.v_nxv32f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv32f16.nxv32i8( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv32f16.nxv32i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.x.v_nxv32f16_nxv32i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv32f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv32f16.nxv32i8( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.x.v.nxv1f32.nxv1i16( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.x.v_nxv1f32_nxv1i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv1f32.nxv1i16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.x.v_nxv1f32_nxv1i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.x.v_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv1f32.nxv1i16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv1f32.nxv1i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.x.v_nxv1f32_nxv1i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv1f32.nxv1i16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.x.v.nxv2f32.nxv2i16( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.x.v_nxv2f32_nxv2i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv2f32.nxv2i16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.x.v_nxv2f32_nxv2i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.x.v_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv2f32.nxv2i16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv2f32.nxv2i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.x.v_nxv2f32_nxv2i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv2f32.nxv2i16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.x.v.nxv4f32.nxv4i16( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.x.v_nxv4f32_nxv4i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv4f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v10, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv4f32.nxv4i16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.x.v_nxv4f32_nxv4i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.x.v_nxv4f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv4f32.nxv4i16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv4f32.nxv4i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.x.v_nxv4f32_nxv4i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv4f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv4f32.nxv4i16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.x.v.nxv8f32.nxv8i16( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.x.v_nxv8f32_nxv8i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv8f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v12, v8 +; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv8f32.nxv8i16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.x.v_nxv8f32_nxv8i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.x.v_nxv8f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv8f32.nxv8i16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv8f32.nxv8i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.x.v_nxv8f32_nxv8i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv8f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv8f32.nxv8i16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.x.v.nxv16f32.nxv16i16( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.x.v_nxv16f32_nxv16i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv16f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v16, v8 +; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv16f32.nxv16i16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.x.v_nxv16f32_nxv16i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.x.v_nxv16f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv16f32.nxv16i16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv16f32.nxv16i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.x.v_nxv16f32_nxv16i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv16f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv16f32.nxv16i16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.x.v.nxv1f64.nxv1i32( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.x.v_nxv1f64_nxv1i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv1f64.nxv1i32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.x.v_nxv1f64_nxv1i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.x.v_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv1f64.nxv1i32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.x.v_nxv1f64_nxv1i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.x.v.nxv2f64.nxv2i32( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.x.v_nxv2f64_nxv2i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv2f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v10, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv2f64.nxv2i32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.x.v_nxv2f64_nxv2i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.x.v_nxv2f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv2f64.nxv2i32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv2f64.nxv2i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.x.v_nxv2f64_nxv2i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv2f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv2f64.nxv2i32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.x.v.nxv4f64.nxv4i32( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.x.v_nxv4f64_nxv4i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv4f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v12, v8 +; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv4f64.nxv4i32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.x.v_nxv4f64_nxv4i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.x.v_nxv4f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv4f64.nxv4i32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv4f64.nxv4i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.x.v_nxv4f64_nxv4i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv4f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv4f64.nxv4i32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.x.v.nxv8f64.nxv8i32( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.x.v_nxv8f64_nxv8i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv8f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v16, v8 +; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv8f64.nxv8i32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.x.v_nxv8f64_nxv8i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.x.v_nxv8f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv8f64.nxv8i32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv8f64.nxv8i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.x.v_nxv8f64_nxv8i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv8f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv8f64.nxv8i32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/strict-vfwcvt-f-xu.ll b/llvm/test/CodeGen/RISCV/rvv/strict-vfwcvt-f-xu.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/strict-vfwcvt-f-xu.ll @@ -0,0 +1,889 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s +declare @llvm.riscv.strict.vfwcvt.f.xu.v.nxv1f16.nxv1i8( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.xu.v_nxv1f16_nxv1i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv1f16.nxv1i8( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.xu.v_nxv1f16_nxv1i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.xu.v_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv1f16.nxv1i8( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv1f16.nxv1i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.xu.v_nxv1f16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv1f16.nxv1i8( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.xu.v.nxv2f16.nxv2i8( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.xu.v_nxv2f16_nxv2i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv2f16.nxv2i8( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.xu.v_nxv2f16_nxv2i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.xu.v_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv2f16.nxv2i8( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv2f16.nxv2i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.xu.v_nxv2f16_nxv2i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv2f16.nxv2i8( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.xu.v.nxv4f16.nxv4i8( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.xu.v_nxv4f16_nxv4i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv4f16.nxv4i8( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.xu.v_nxv4f16_nxv4i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.xu.v_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv4f16.nxv4i8( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv4f16.nxv4i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.xu.v_nxv4f16_nxv4i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv4f16.nxv4i8( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.xu.v.nxv8f16.nxv8i8( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.xu.v_nxv8f16_nxv8i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv8f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v10, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv8f16.nxv8i8( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.xu.v_nxv8f16_nxv8i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.xu.v_nxv8f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv8f16.nxv8i8( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv8f16.nxv8i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.xu.v_nxv8f16_nxv8i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv8f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv8f16.nxv8i8( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.xu.v.nxv16f16.nxv16i8( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.xu.v_nxv16f16_nxv16i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv16f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v12, v8 +; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv16f16.nxv16i8( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.xu.v_nxv16f16_nxv16i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.xu.v_nxv16f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv16f16.nxv16i8( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv16f16.nxv16i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.xu.v_nxv16f16_nxv16i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv16f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv16f16.nxv16i8( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.xu.v.nxv32f16.nxv32i8( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.xu.v_nxv32f16_nxv32i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv32f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v16, v8 +; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv32f16.nxv32i8( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.xu.v_nxv32f16_nxv32i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.xu.v_nxv32f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv32f16.nxv32i8( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv32f16.nxv32i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.xu.v_nxv32f16_nxv32i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv32f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv32f16.nxv32i8( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.xu.v.nxv1f32.nxv1i16( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.xu.v_nxv1f32_nxv1i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv1f32.nxv1i16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.xu.v_nxv1f32_nxv1i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.xu.v_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv1f32.nxv1i16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv1f32.nxv1i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.xu.v_nxv1f32_nxv1i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv1f32.nxv1i16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.xu.v.nxv2f32.nxv2i16( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.xu.v_nxv2f32_nxv2i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv2f32.nxv2i16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.xu.v_nxv2f32_nxv2i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.xu.v_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv2f32.nxv2i16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv2f32.nxv2i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.xu.v_nxv2f32_nxv2i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv2f32.nxv2i16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.xu.v.nxv4f32.nxv4i16( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.xu.v_nxv4f32_nxv4i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv4f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v10, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv4f32.nxv4i16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.xu.v_nxv4f32_nxv4i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.xu.v_nxv4f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv4f32.nxv4i16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv4f32.nxv4i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.xu.v_nxv4f32_nxv4i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv4f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv4f32.nxv4i16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.xu.v.nxv8f32.nxv8i16( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.xu.v_nxv8f32_nxv8i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv8f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v12, v8 +; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv8f32.nxv8i16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.xu.v_nxv8f32_nxv8i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.xu.v_nxv8f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv8f32.nxv8i16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv8f32.nxv8i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.xu.v_nxv8f32_nxv8i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv8f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv8f32.nxv8i16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.xu.v.nxv16f32.nxv16i16( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.xu.v_nxv16f32_nxv16i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv16f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v16, v8 +; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv16f32.nxv16i16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.xu.v_nxv16f32_nxv16i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.xu.v_nxv16f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv16f32.nxv16i16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv16f32.nxv16i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.xu.v_nxv16f32_nxv16i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv16f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv16f32.nxv16i16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.xu.v.nxv1f64.nxv1i32( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.xu.v_nxv1f64_nxv1i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv1f64.nxv1i32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.xu.v_nxv1f64_nxv1i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.xu.v_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv1f64.nxv1i32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv1f64.nxv1i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.xu.v_nxv1f64_nxv1i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv1f64.nxv1i32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.xu.v.nxv2f64.nxv2i32( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.xu.v_nxv2f64_nxv2i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv2f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v10, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv2f64.nxv2i32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.xu.v_nxv2f64_nxv2i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.xu.v_nxv2f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv2f64.nxv2i32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv2f64.nxv2i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.xu.v_nxv2f64_nxv2i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv2f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv2f64.nxv2i32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.xu.v.nxv4f64.nxv4i32( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.xu.v_nxv4f64_nxv4i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv4f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v12, v8 +; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv4f64.nxv4i32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.xu.v_nxv4f64_nxv4i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.xu.v_nxv4f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv4f64.nxv4i32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv4f64.nxv4i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.xu.v_nxv4f64_nxv4i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv4f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv4f64.nxv4i32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.xu.v.nxv8f64.nxv8i32( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.xu.v_nxv8f64_nxv8i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv8f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v16, v8 +; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv8f64.nxv8i32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.xu.v_nxv8f64_nxv8i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.xu.v_nxv8f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv8f64.nxv8i32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv8f64.nxv8i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.xu.v_nxv8f64_nxv8i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv8f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv8f64.nxv8i32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/strict-vfwcvt-x-f.ll b/llvm/test/CodeGen/RISCV/rvv/strict-vfwcvt-x-f.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/strict-vfwcvt-x-f.ll @@ -0,0 +1,535 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s +declare @llvm.riscv.strict.vfwcvt.x.f.v.nxv1i32.nxv1f16( + , + , + iXLen); + +define @intrinsic_vfwcvt_x.f.v_nxv1i32_nxv1f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv1i32_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfwcvt.x.f.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.x.f.v.nxv1i32.nxv1f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_x.f.v_nxv1i32_nxv1f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_x.f.v_nxv1i32_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfwcvt.x.f.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.x.f.v.nxv1i32.nxv1f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.x.f.v.mask.nxv1i32.nxv1f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_x.f.v_nxv1i32_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv1i32_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfwcvt.x.f.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.x.f.v.mask.nxv1i32.nxv1f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.x.f.v.nxv2i32.nxv2f16( + , + , + iXLen); + +define @intrinsic_vfwcvt_x.f.v_nxv2i32_nxv2f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv2i32_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwcvt.x.f.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.x.f.v.nxv2i32.nxv2f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_x.f.v_nxv2i32_nxv2f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_x.f.v_nxv2i32_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfwcvt.x.f.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.x.f.v.nxv2i32.nxv2f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.x.f.v.mask.nxv2i32.nxv2f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_x.f.v_nxv2i32_nxv2f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv2i32_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwcvt.x.f.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.x.f.v.mask.nxv2i32.nxv2f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.x.f.v.nxv4i32.nxv4f16( + , + , + iXLen); + +define @intrinsic_vfwcvt_x.f.v_nxv4i32_nxv4f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv4i32_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfwcvt.x.f.v v10, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.x.f.v.nxv4i32.nxv4f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_x.f.v_nxv4i32_nxv4f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_x.f.v_nxv4i32_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfwcvt.x.f.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.x.f.v.nxv4i32.nxv4f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.x.f.v.mask.nxv4i32.nxv4f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_x.f.v_nxv4i32_nxv4f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv4i32_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfwcvt.x.f.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.x.f.v.mask.nxv4i32.nxv4f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.x.f.v.nxv8i32.nxv8f16( + , + , + iXLen); + +define @intrinsic_vfwcvt_x.f.v_nxv8i32_nxv8f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv8i32_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfwcvt.x.f.v v12, v8 +; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.x.f.v.nxv8i32.nxv8f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_x.f.v_nxv8i32_nxv8f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_x.f.v_nxv8i32_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfwcvt.x.f.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.x.f.v.nxv8i32.nxv8f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.x.f.v.mask.nxv8i32.nxv8f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_x.f.v_nxv8i32_nxv8f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv8i32_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfwcvt.x.f.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.x.f.v.mask.nxv8i32.nxv8f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.x.f.v.nxv16i32.nxv16f16( + , + , + iXLen); + +define @intrinsic_vfwcvt_x.f.v_nxv16i32_nxv16f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv16i32_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfwcvt.x.f.v v16, v8 +; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.x.f.v.nxv16i32.nxv16f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_x.f.v_nxv16i32_nxv16f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_x.f.v_nxv16i32_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfwcvt.x.f.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.x.f.v.nxv16i32.nxv16f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.x.f.v.mask.nxv16i32.nxv16f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_x.f.v_nxv16i32_nxv16f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv16i32_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfwcvt.x.f.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.x.f.v.mask.nxv16i32.nxv16f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.x.f.v.nxv1i64.nxv1f32( + , + , + iXLen); + +define @intrinsic_vfwcvt_x.f.v_nxv1i64_nxv1f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv1i64_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfwcvt.x.f.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.x.f.v.nxv1i64.nxv1f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_x.f.v_nxv1i64_nxv1f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_x.f.v_nxv1i64_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfwcvt.x.f.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.x.f.v.nxv1i64.nxv1f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.x.f.v.mask.nxv1i64.nxv1f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_x.f.v_nxv1i64_nxv1f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv1i64_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfwcvt.x.f.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.x.f.v.mask.nxv1i64.nxv1f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.x.f.v.nxv2i64.nxv2f32( + , + , + iXLen); + +define @intrinsic_vfwcvt_x.f.v_nxv2i64_nxv2f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv2i64_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfwcvt.x.f.v v10, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.x.f.v.nxv2i64.nxv2f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_x.f.v_nxv2i64_nxv2f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_x.f.v_nxv2i64_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfwcvt.x.f.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.x.f.v.nxv2i64.nxv2f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.x.f.v.mask.nxv2i64.nxv2f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_x.f.v_nxv2i64_nxv2f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv2i64_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfwcvt.x.f.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.x.f.v.mask.nxv2i64.nxv2f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.x.f.v.nxv4i64.nxv4f32( + , + , + iXLen); + +define @intrinsic_vfwcvt_x.f.v_nxv4i64_nxv4f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv4i64_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfwcvt.x.f.v v12, v8 +; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.x.f.v.nxv4i64.nxv4f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_x.f.v_nxv4i64_nxv4f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_x.f.v_nxv4i64_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfwcvt.x.f.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.x.f.v.nxv4i64.nxv4f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.x.f.v.mask.nxv4i64.nxv4f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_x.f.v_nxv4i64_nxv4f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv4i64_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfwcvt.x.f.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.x.f.v.mask.nxv4i64.nxv4f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.x.f.v.nxv8i64.nxv8f32( + , + , + iXLen); + +define @intrinsic_vfwcvt_x.f.v_nxv8i64_nxv8f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv8i64_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfwcvt.x.f.v v16, v8 +; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.x.f.v.nxv8i64.nxv8f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_x.f.v_nxv8i64_nxv8f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_x.f.v_nxv8i64_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfwcvt.x.f.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.x.f.v.nxv8i64.nxv8f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.x.f.v.mask.nxv8i64.nxv8f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_x.f.v_nxv8i64_nxv8f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv8i64_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfwcvt.x.f.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.x.f.v.mask.nxv8i64.nxv8f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/strict-vfwcvt-xu-f.ll b/llvm/test/CodeGen/RISCV/rvv/strict-vfwcvt-xu-f.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/strict-vfwcvt-xu-f.ll @@ -0,0 +1,535 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s +declare @llvm.riscv.strict.vfwcvt.xu.f.v.nxv1i32.nxv1f16( + , + , + iXLen); + +define @intrinsic_vfwcvt_xu.f.v_nxv1i32_nxv1f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv1i32_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfwcvt.xu.f.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.xu.f.v.nxv1i32.nxv1f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_xu.f.v_nxv1i32_nxv1f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_xu.f.v_nxv1i32_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfwcvt.xu.f.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.xu.f.v.nxv1i32.nxv1f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfwcvt.xu.f.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.xu.f.v.nxv2i32.nxv2f16( + , + , + iXLen); + +define @intrinsic_vfwcvt_xu.f.v_nxv2i32_nxv2f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv2i32_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwcvt.xu.f.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.xu.f.v.nxv2i32.nxv2f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_xu.f.v_nxv2i32_nxv2f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_xu.f.v_nxv2i32_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfwcvt.xu.f.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.xu.f.v.nxv2i32.nxv2f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.xu.f.v.mask.nxv2i32.nxv2f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_xu.f.v_nxv2i32_nxv2f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv2i32_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwcvt.xu.f.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.xu.f.v.mask.nxv2i32.nxv2f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.xu.f.v.nxv4i32.nxv4f16( + , + , + iXLen); + +define @intrinsic_vfwcvt_xu.f.v_nxv4i32_nxv4f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv4i32_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfwcvt.xu.f.v v10, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.xu.f.v.nxv4i32.nxv4f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_xu.f.v_nxv4i32_nxv4f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_xu.f.v_nxv4i32_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfwcvt.xu.f.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.xu.f.v.nxv4i32.nxv4f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.xu.f.v.mask.nxv4i32.nxv4f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_xu.f.v_nxv4i32_nxv4f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv4i32_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfwcvt.xu.f.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.xu.f.v.mask.nxv4i32.nxv4f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.xu.f.v.nxv8i32.nxv8f16( + , + , + iXLen); + +define @intrinsic_vfwcvt_xu.f.v_nxv8i32_nxv8f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv8i32_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfwcvt.xu.f.v v12, v8 +; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.xu.f.v.nxv8i32.nxv8f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_xu.f.v_nxv8i32_nxv8f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_xu.f.v_nxv8i32_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfwcvt.xu.f.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.xu.f.v.nxv8i32.nxv8f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.xu.f.v.mask.nxv8i32.nxv8f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_xu.f.v_nxv8i32_nxv8f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv8i32_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfwcvt.xu.f.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.xu.f.v.mask.nxv8i32.nxv8f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.xu.f.v.nxv16i32.nxv16f16( + , + , + iXLen); + +define @intrinsic_vfwcvt_xu.f.v_nxv16i32_nxv16f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv16i32_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfwcvt.xu.f.v v16, v8 +; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.xu.f.v.nxv16i32.nxv16f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_xu.f.v_nxv16i32_nxv16f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_xu.f.v_nxv16i32_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfwcvt.xu.f.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.xu.f.v.nxv16i32.nxv16f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.xu.f.v.mask.nxv16i32.nxv16f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_xu.f.v_nxv16i32_nxv16f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv16i32_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfwcvt.xu.f.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.xu.f.v.mask.nxv16i32.nxv16f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.xu.f.v.nxv1i64.nxv1f32( + , + , + iXLen); + +define @intrinsic_vfwcvt_xu.f.v_nxv1i64_nxv1f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv1i64_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfwcvt.xu.f.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.xu.f.v.nxv1i64.nxv1f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_xu.f.v_nxv1i64_nxv1f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_xu.f.v_nxv1i64_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfwcvt.xu.f.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.xu.f.v.nxv1i64.nxv1f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_xu.f.v_nxv1i64_nxv1f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv1i64_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfwcvt.xu.f.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.xu.f.v.nxv2i64.nxv2f32( + , + , + iXLen); + +define @intrinsic_vfwcvt_xu.f.v_nxv2i64_nxv2f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv2i64_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfwcvt.xu.f.v v10, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.xu.f.v.nxv2i64.nxv2f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_xu.f.v_nxv2i64_nxv2f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_xu.f.v_nxv2i64_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfwcvt.xu.f.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.xu.f.v.nxv2i64.nxv2f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.xu.f.v.mask.nxv2i64.nxv2f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_xu.f.v_nxv2i64_nxv2f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv2i64_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfwcvt.xu.f.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.xu.f.v.mask.nxv2i64.nxv2f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.xu.f.v.nxv4i64.nxv4f32( + , + , + iXLen); + +define @intrinsic_vfwcvt_xu.f.v_nxv4i64_nxv4f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv4i64_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfwcvt.xu.f.v v12, v8 +; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.xu.f.v.nxv4i64.nxv4f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_xu.f.v_nxv4i64_nxv4f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_xu.f.v_nxv4i64_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfwcvt.xu.f.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.xu.f.v.nxv4i64.nxv4f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.xu.f.v.mask.nxv4i64.nxv4f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_xu.f.v_nxv4i64_nxv4f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv4i64_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfwcvt.xu.f.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.xu.f.v.mask.nxv4i64.nxv4f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.xu.f.v.nxv8i64.nxv8f32( + , + , + iXLen); + +define @intrinsic_vfwcvt_xu.f.v_nxv8i64_nxv8f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv8i64_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfwcvt.xu.f.v v16, v8 +; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.xu.f.v.nxv8i64.nxv8f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_xu.f.v_nxv8i64_nxv8f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_xu.f.v_nxv8i64_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfwcvt.xu.f.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.xu.f.v.nxv8i64.nxv8f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.xu.f.v.mask.nxv8i64.nxv8f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_xu.f.v_nxv8i64_nxv8f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv8i64_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfwcvt.xu.f.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.xu.f.v.mask.nxv8i64.nxv8f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +}