diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -647,6 +647,8 @@ SDValue lowerVPOp(SDValue Op, SelectionDAG &DAG, unsigned RISCVISDOpc) const; SDValue lowerLogicVPOp(SDValue Op, SelectionDAG &DAG, unsigned MaskOpc, unsigned VecOpc) const; + SDValue lowerVPFPIntConvOp(SDValue Op, SelectionDAG &DAG, + unsigned RISCVISDOpc) const; SDValue lowerFixedLengthVectorExtendToRVV(SDValue Op, SelectionDAG &DAG, unsigned ExtendOpc) const; SDValue lowerGET_ROUNDING(SDValue Op, SelectionDAG &DAG) const; diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -490,13 +490,14 @@ ISD::VP_SHL, ISD::VP_REDUCE_ADD, ISD::VP_REDUCE_AND, ISD::VP_REDUCE_OR, ISD::VP_REDUCE_XOR, ISD::VP_REDUCE_SMAX, ISD::VP_REDUCE_SMIN, ISD::VP_REDUCE_UMAX, ISD::VP_REDUCE_UMIN, - ISD::VP_MERGE, ISD::VP_SELECT}; + ISD::VP_MERGE, ISD::VP_SELECT, ISD::VP_FPTOSI}; static const unsigned FloatingPointVPOps[] = { ISD::VP_FADD, ISD::VP_FSUB, ISD::VP_FMUL, ISD::VP_FDIV, ISD::VP_FNEG, ISD::VP_FMA, ISD::VP_REDUCE_FADD, ISD::VP_REDUCE_SEQ_FADD, ISD::VP_REDUCE_FMIN, - ISD::VP_REDUCE_FMAX, ISD::VP_MERGE, ISD::VP_SELECT}; + ISD::VP_REDUCE_FMAX, ISD::VP_MERGE, ISD::VP_SELECT, + ISD::VP_SITOFP}; if (!Subtarget.is64Bit()) { // We must custom-lower certain vXi64 operations on RV32 due to the vector @@ -846,6 +847,8 @@ setOperationAction(ISD::AND, VT, Custom); setOperationAction(ISD::OR, VT, Custom); setOperationAction(ISD::XOR, VT, Custom); + + setOperationAction(ISD::VP_FPTOSI, VT, Custom); continue; } @@ -3692,6 +3695,10 @@ return lowerVPOp(Op, DAG, RISCVISD::FNEG_VL); case ISD::VP_FMA: return lowerVPOp(Op, DAG, RISCVISD::FMA_VL); + case ISD::VP_FPTOSI: + return lowerVPFPIntConvOp(Op, DAG, RISCVISD::FP_TO_SINT_VL); + case ISD::VP_SITOFP: + return lowerVPFPIntConvOp(Op, DAG, RISCVISD::SINT_TO_FP_VL); } } @@ -6126,6 +6133,124 @@ return convertFromScalableVector(VT, VPOp, DAG, Subtarget); } +// Lower Floating-Point/Integer Type-Convert VP SDNodes +SDValue RISCVTargetLowering::lowerVPFPIntConvOp(SDValue Op, SelectionDAG &DAG, + unsigned RISCVISDOpc) const { + SDLoc DL(Op); + + SDValue Src = Op.getOperand(0); + SDValue Mask = Op.getOperand(1); + SDValue VL = Op.getOperand(2); + + MVT DstVT = Op.getSimpleValueType(); + MVT SrcVT = Src.getSimpleValueType(); + if (DstVT.isFixedLengthVector()) { + DstVT = getContainerForFixedLengthVector(DstVT); + SrcVT = getContainerForFixedLengthVector(SrcVT); + Src = convertToScalableVector(SrcVT, Src, DAG, Subtarget); + MVT MaskVT = MVT::getVectorVT(MVT::i1, DstVT.getVectorElementCount()); + Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget); + } + + unsigned RISCVISDExtOpc = (RISCVISDOpc == RISCVISD::SINT_TO_FP_VL || + RISCVISDOpc == RISCVISD::FP_TO_SINT_VL) + ? RISCVISD::VSEXT_VL + : RISCVISD::VZEXT_VL; + + unsigned DstEltSize = DstVT.getScalarSizeInBits(); + unsigned SrcEltSize = SrcVT.getScalarSizeInBits(); + + SDValue Result; + if (DstEltSize >= SrcEltSize) { // Single-width and widening conversion. + if (SrcVT.isInteger()) { + assert(DstVT.isFloatingPoint() && "Wrong input/output vector types"); + + // Do we need to do any pre-widening before converting? + if (SrcEltSize == 1) { + MVT IntVT = DstVT.changeVectorElementTypeToInteger(); + MVT XLenVT = Subtarget.getXLenVT(); + SDValue Zero = DAG.getConstant(0, DL, XLenVT); + SDValue ZeroSplat = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, IntVT, + DAG.getUNDEF(IntVT), Zero, VL); + SDValue One = DAG.getConstant( + RISCVISDExtOpc == RISCVISD::VZEXT_VL ? 1 : -1, DL, XLenVT); + SDValue OneSplat = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, IntVT, + DAG.getUNDEF(IntVT), One, VL); + Src = DAG.getNode(RISCVISD::VSELECT_VL, DL, IntVT, Src, OneSplat, + ZeroSplat, VL); + } else if (DstEltSize > (2 * SrcEltSize)) { + // Widen before converting. + MVT IntVT = MVT::getVectorVT(MVT::getIntegerVT(DstEltSize / 2), + DstVT.getVectorElementCount()); + Src = DAG.getNode(RISCVISDExtOpc, DL, IntVT, {Src, Mask, VL}); + } + + Result = DAG.getNode(RISCVISDOpc, DL, DstVT, {Src, Mask, VL}); + } else { + assert(SrcVT.isFloatingPoint() && DstVT.isInteger() && + "Wrong input/output vector types"); + + // Convert f16 to f32 then convert f32 to i64. + if (DstEltSize > (2 * SrcEltSize)) { + assert(SrcVT.getVectorElementType() == MVT::f16 && "Unexpected type!"); + MVT InterimFVT = + MVT::getVectorVT(MVT::f32, DstVT.getVectorElementCount()); + Src = DAG.getNode(RISCVISD::FP_EXTEND_VL, DL, InterimFVT, + {Src, Mask, VL}); + } + + Result = DAG.getNode(RISCVISDOpc, DL, DstVT, {Src, Mask, VL}); + } + } else { // Narrowing + Conversion + if (SrcVT.isInteger()) { + assert(DstVT.isFloatingPoint() && "Wrong input/output vector types"); + // First do a narrowing convert to an FP type half the size, then round + // the FP type to a small FP type if needed. + + MVT InterimFVT = DstVT; + if (SrcEltSize > (2 * DstEltSize)) { + assert(SrcEltSize == (4 * DstEltSize) && "Unexpected types!"); + assert(DstVT.getVectorElementType() == MVT::f16 && "Unexpected type!"); + InterimFVT = MVT::getVectorVT(MVT::f32, DstVT.getVectorElementCount()); + } + + Result = DAG.getNode(RISCVISDOpc, DL, InterimFVT, {Src, Mask, VL}); + + if (InterimFVT != DstVT) { + Src = Result; + Result = DAG.getNode(RISCVISD::FP_ROUND_VL, DL, DstVT, {Src, Mask, VL}); + } + } else { + assert(SrcVT.isFloatingPoint() && DstVT.isInteger() && + "Wrong input/output vector types"); + // First do a narrowing conversion to an integer half the size, then + // truncate if needed. + + // TODO: Handle mask vectors + assert(DstVT.getVectorElementType() != MVT::i1 && + "Don't know how to handle masks yet!"); + MVT InterimIVT = MVT::getVectorVT(MVT::getIntegerVT(SrcEltSize / 2), + DstVT.getVectorElementCount()); + + Result = DAG.getNode(RISCVISDOpc, DL, InterimIVT, {Src, Mask, VL}); + + while (InterimIVT != DstVT) { + SrcEltSize /= 2; + Src = Result; + InterimIVT = MVT::getVectorVT(MVT::getIntegerVT(SrcEltSize / 2), + DstVT.getVectorElementCount()); + Result = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, InterimIVT, + {Src, Mask, VL}); + } + } + } + + MVT VT = Op.getSimpleValueType(); + if (!VT.isFixedLengthVector()) + return Result; + return convertFromScalableVector(VT, Result, DAG, Subtarget); +} + SDValue RISCVTargetLowering::lowerLogicVPOp(SDValue Op, SelectionDAG &DAG, unsigned MaskOpc, unsigned VecOpc) const { diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td @@ -553,6 +553,12 @@ true_mask, VLOpFrag)), (!cast(inst_name#"_"#suffix#"_"#vti.LMul.MX) fti.RegClass:$rs2, GPR:$vl, vti.Log2SEW)>; + def : Pat<(vti.Vector (vop (fti.Vector fti.RegClass:$rs2), + (fti.Mask V0), VLOpFrag)), + (!cast(inst_name#"_"#suffix#"_"#vti.LMul.MX#"_MASK") + (vti.Vector (IMPLICIT_DEF)), + fti.RegClass:$rs2, + (fti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; } } @@ -564,6 +570,12 @@ VLOpFrag)), (!cast(instruction_name#"_"#ivti.LMul.MX) fvti.RegClass:$rs1, GPR:$vl, ivti.Log2SEW)>; + def : Pat<(ivti.Vector (vop (fvti.Vector fvti.RegClass:$rs1), + (fvti.Mask V0), + VLOpFrag)), + (!cast(instruction_name#"_"#ivti.LMul.MX#"_MASK") + (ivti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1, + (fvti.Mask V0), GPR:$vl, ivti.Log2SEW, TAIL_AGNOSTIC)>; } } @@ -575,6 +587,12 @@ VLOpFrag)), (!cast(instruction_name#"_"#fvti.LMul.MX) ivti.RegClass:$rs1, GPR:$vl, fvti.Log2SEW)>; + def : Pat<(fvti.Vector (vop (ivti.Vector ivti.RegClass:$rs1), + (ivti.Mask V0), + VLOpFrag)), + (!cast(instruction_name#"_"#fvti.LMul.MX#"_MASK") + (fvti.Vector (IMPLICIT_DEF)), ivti.RegClass:$rs1, + (ivti.Mask V0), GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC)>; } } @@ -587,6 +605,12 @@ VLOpFrag)), (!cast(instruction_name#"_"#fvti.LMul.MX) fvti.RegClass:$rs1, GPR:$vl, fvti.Log2SEW)>; + def : Pat<(iwti.Vector (vop (fvti.Vector fvti.RegClass:$rs1), + (fvti.Mask V0), + VLOpFrag)), + (!cast(instruction_name#"_"#fvti.LMul.MX#"_MASK") + (iwti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1, + (fvti.Mask V0), GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC)>; } } @@ -599,6 +623,12 @@ VLOpFrag)), (!cast(instruction_name#"_"#ivti.LMul.MX) ivti.RegClass:$rs1, GPR:$vl, ivti.Log2SEW)>; + def : Pat<(fwti.Vector (vop (ivti.Vector ivti.RegClass:$rs1), + (ivti.Mask V0), + VLOpFrag)), + (!cast(instruction_name#"_"#ivti.LMul.MX#"_MASK") + (fwti.Vector (IMPLICIT_DEF)), ivti.RegClass:$rs1, + (ivti.Mask V0), GPR:$vl, ivti.Log2SEW, TAIL_AGNOSTIC)>; } } @@ -611,6 +641,12 @@ VLOpFrag)), (!cast(instruction_name#"_"#vti.LMul.MX) fwti.RegClass:$rs1, GPR:$vl, vti.Log2SEW)>; + def : Pat<(vti.Vector (vop (fwti.Vector fwti.RegClass:$rs1), + (fwti.Mask V0), + VLOpFrag)), + (!cast(instruction_name#"_"#vti.LMul.MX#"_MASK") + (vti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1, + (fwti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; } } @@ -623,6 +659,12 @@ VLOpFrag)), (!cast(instruction_name#"_"#fvti.LMul.MX) iwti.RegClass:$rs1, GPR:$vl, fvti.Log2SEW)>; + def : Pat<(fvti.Vector (vop (iwti.Vector iwti.RegClass:$rs1), + (iwti.Mask V0), + VLOpFrag)), + (!cast(instruction_name#"_"#fvti.LMul.MX#"_MASK") + (fvti.Vector (IMPLICIT_DEF)), iwti.RegClass:$rs1, + (iwti.Mask V0), GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC)>; } } @@ -808,6 +850,12 @@ VLOpFrag)), (!cast("PseudoVNSRL_WX_"#vti.LMul.MX) wti.RegClass:$rs1, X0, GPR:$vl, vti.Log2SEW)>; + def : Pat<(vti.Vector (riscv_trunc_vector_vl (wti.Vector wti.RegClass:$rs1), + (vti.Mask V0), + VLOpFrag)), + (!cast("PseudoVNSRL_WX_"#vti.LMul.MX#"_MASK") + (vti.Vector (IMPLICIT_DEF)), wti.RegClass:$rs1, X0, + (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; def : Pat<(vti.Vector (riscv_trunc_vector_vl @@ -1504,6 +1552,12 @@ VLOpFrag)), (!cast("PseudoVFWCVT_F_F_V_"#fvti.LMul.MX) fvti.RegClass:$rs1, GPR:$vl, fvti.Log2SEW)>; + def : Pat<(fwti.Vector (riscv_fpextend_vl (fvti.Vector fvti.RegClass:$rs1), + (fvti.Mask V0), + VLOpFrag)), + (!cast("PseudoVFWCVT_F_F_V_"#fvti.LMul.MX#"_MASK") + (fwti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1, + (fvti.Mask V0), GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC)>; } // 14.19 Narrowing Floating-Point/Integer Type-Convert Instructions @@ -1519,6 +1573,12 @@ VLOpFrag)), (!cast("PseudoVFNCVT_F_F_W_"#fvti.LMul.MX) fwti.RegClass:$rs1, GPR:$vl, fvti.Log2SEW)>; + def : Pat<(fvti.Vector (riscv_fpround_vl (fwti.Vector fwti.RegClass:$rs1), + (fwti.Mask V0), + VLOpFrag)), + (!cast("PseudoVFNCVT_F_F_W_"#fvti.LMul.MX#"_MASK") + (fvti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1, + (fwti.Mask V0), GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC)>; def : Pat<(fvti.Vector (riscv_fncvt_rod_vl (fwti.Vector fwti.RegClass:$rs1), (fwti.Mask true_mask), diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp.ll @@ -0,0 +1,300 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zfh,+experimental-zvfh \ +; RUN: -riscv-v-vector-bits-min=128 < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zfh,+experimental-zvfh \ +; RUN: -riscv-v-vector-bits-min=128 < %s | FileCheck %s + +declare <4 x i8> @llvm.vp.fptosi.v4i8.v4f16(<4 x half>, <4 x i1>, i32) + +define <4 x i8> @vfptosi_v4i8_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_v4i8_v4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %v = call <4 x i8> @llvm.vp.fptosi.v4i8.v4f16(<4 x half> %va, <4 x i1> %m, i32 %evl) + ret <4 x i8> %v +} + +define <4 x i8> @vfptosi_v4i8_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_v4i8_v4f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %v = call <4 x i8> @llvm.vp.fptosi.v4i8.v4f16(<4 x half> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) + ret <4 x i8> %v +} + +declare <4 x i16> @llvm.vp.fptosi.v4i16.v4f16(<4 x half>, <4 x i1>, i32) + +define <4 x i16> @vfptosi_v4i16_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_v4i16_v4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8, v0.t +; CHECK-NEXT: ret + %v = call <4 x i16> @llvm.vp.fptosi.v4i16.v4f16(<4 x half> %va, <4 x i1> %m, i32 %evl) + ret <4 x i16> %v +} + +define <4 x i16> @vfptosi_v4i16_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_v4i16_v4f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 +; CHECK-NEXT: ret + %v = call <4 x i16> @llvm.vp.fptosi.v4i16.v4f16(<4 x half> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) + ret <4 x i16> %v +} + +declare <4 x i32> @llvm.vp.fptosi.v4i32.v4f16(<4 x half>, <4 x i1>, i32) + +define <4 x i32> @vfptosi_v4i32_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_v4i32_v4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwcvt.rtz.x.f.v v9, v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %v = call <4 x i32> @llvm.vp.fptosi.v4i32.v4f16(<4 x half> %va, <4 x i1> %m, i32 %evl) + ret <4 x i32> %v +} + +define <4 x i32> @vfptosi_v4i32_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_v4i32_v4f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwcvt.rtz.x.f.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %v = call <4 x i32> @llvm.vp.fptosi.v4i32.v4f16(<4 x half> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) + ret <4 x i32> %v +} + +declare <4 x i64> @llvm.vp.fptosi.v4i64.v4f16(<4 x half>, <4 x i1>, i32) + +define <4 x i64> @vfptosi_v4i64_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_v4i64_v4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwcvt.f.f.v v10, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v10, v0.t +; CHECK-NEXT: ret + %v = call <4 x i64> @llvm.vp.fptosi.v4i64.v4f16(<4 x half> %va, <4 x i1> %m, i32 %evl) + ret <4 x i64> %v +} + +define <4 x i64> @vfptosi_v4i64_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_v4i64_v4f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwcvt.f.f.v v10, v8 +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v10 +; CHECK-NEXT: ret + %v = call <4 x i64> @llvm.vp.fptosi.v4i64.v4f16(<4 x half> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) + ret <4 x i64> %v +} + +declare <4 x i8> @llvm.vp.fptosi.v4i8.v4f32(<4 x float>, <4 x i1>, i32) + +define <4 x i8> @vfptosi_v4i8_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_v4i8_v4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-NEXT: vncvt.x.x.w v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <4 x i8> @llvm.vp.fptosi.v4i8.v4f32(<4 x float> %va, <4 x i1> %m, i32 %evl) + ret <4 x i8> %v +} + +define <4 x i8> @vfptosi_v4i8_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_v4i8_v4f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-NEXT: vncvt.x.x.w v8, v9 +; CHECK-NEXT: ret + %v = call <4 x i8> @llvm.vp.fptosi.v4i8.v4f32(<4 x float> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) + ret <4 x i8> %v +} + +declare <4 x i16> @llvm.vp.fptosi.v4i16.v4f32(<4 x float>, <4 x i1>, i32) + +define <4 x i16> @vfptosi_v4i16_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_v4i16_v4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %v = call <4 x i16> @llvm.vp.fptosi.v4i16.v4f32(<4 x float> %va, <4 x i1> %m, i32 %evl) + ret <4 x i16> %v +} + +define <4 x i16> @vfptosi_v4i16_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_v4i16_v4f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %v = call <4 x i16> @llvm.vp.fptosi.v4i16.v4f32(<4 x float> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) + ret <4 x i16> %v +} + +declare <4 x i32> @llvm.vp.fptosi.v4i32.v4f32(<4 x float>, <4 x i1>, i32) + +define <4 x i32> @vfptosi_v4i32_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_v4i32_v4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8, v0.t +; CHECK-NEXT: ret + %v = call <4 x i32> @llvm.vp.fptosi.v4i32.v4f32(<4 x float> %va, <4 x i1> %m, i32 %evl) + ret <4 x i32> %v +} + +define <4 x i32> @vfptosi_v4i32_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_v4i32_v4f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 +; CHECK-NEXT: ret + %v = call <4 x i32> @llvm.vp.fptosi.v4i32.v4f32(<4 x float> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) + ret <4 x i32> %v +} + +declare <4 x i64> @llvm.vp.fptosi.v4i64.v4f32(<4 x float>, <4 x i1>, i32) + +define <4 x i64> @vfptosi_v4i64_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_v4i64_v4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfwcvt.rtz.x.f.v v10, v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: ret + %v = call <4 x i64> @llvm.vp.fptosi.v4i64.v4f32(<4 x float> %va, <4 x i1> %m, i32 %evl) + ret <4 x i64> %v +} + +define <4 x i64> @vfptosi_v4i64_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_v4i64_v4f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfwcvt.rtz.x.f.v v10, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: ret + %v = call <4 x i64> @llvm.vp.fptosi.v4i64.v4f32(<4 x float> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) + ret <4 x i64> %v +} + +declare <4 x i8> @llvm.vp.fptosi.v4i8.v4f64(<4 x double>, <4 x i1>, i32) + +define <4 x i8> @vfptosi_v4i8_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_v4i8_v4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vncvt.x.x.w v8, v10, v0.t +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-NEXT: vncvt.x.x.w v8, v8, v0.t +; CHECK-NEXT: ret + %v = call <4 x i8> @llvm.vp.fptosi.v4i8.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl) + ret <4 x i8> %v +} + +define <4 x i8> @vfptosi_v4i8_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_v4i8_v4f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vncvt.x.x.w v8, v10 +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: ret + %v = call <4 x i8> @llvm.vp.fptosi.v4i8.v4f64(<4 x double> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) + ret <4 x i8> %v +} + +declare <4 x i16> @llvm.vp.fptosi.v4i16.v4f64(<4 x double>, <4 x i1>, i32) + +define <4 x i16> @vfptosi_v4i16_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_v4i16_v4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vncvt.x.x.w v8, v10, v0.t +; CHECK-NEXT: ret + %v = call <4 x i16> @llvm.vp.fptosi.v4i16.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl) + ret <4 x i16> %v +} + +define <4 x i16> @vfptosi_v4i16_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_v4i16_v4f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vncvt.x.x.w v8, v10 +; CHECK-NEXT: ret + %v = call <4 x i16> @llvm.vp.fptosi.v4i16.v4f64(<4 x double> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) + ret <4 x i16> %v +} + +declare <4 x i32> @llvm.vp.fptosi.v4i32.v4f64(<4 x double>, <4 x i1>, i32) + +define <4 x i32> @vfptosi_v4i32_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_v4i32_v4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8, v0.t +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret + %v = call <4 x i32> @llvm.vp.fptosi.v4i32.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl) + ret <4 x i32> %v +} + +define <4 x i32> @vfptosi_v4i32_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_v4i32_v4f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret + %v = call <4 x i32> @llvm.vp.fptosi.v4i32.v4f64(<4 x double> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) + ret <4 x i32> %v +} + +declare <4 x i64> @llvm.vp.fptosi.v4i64.v4f64(<4 x double>, <4 x i1>, i32) + +define <4 x i64> @vfptosi_v4i64_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_v4i64_v4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8, v0.t +; CHECK-NEXT: ret + %v = call <4 x i64> @llvm.vp.fptosi.v4i64.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl) + ret <4 x i64> %v +} + +define <4 x i64> @vfptosi_v4i64_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_v4i64_v4f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 +; CHECK-NEXT: ret + %v = call <4 x i64> @llvm.vp.fptosi.v4i64.v4f64(<4 x double> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) + ret <4 x i64> %v +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp-mask.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp-mask.ll @@ -0,0 +1,86 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zfh,+experimental-zvfh \ +; RUN: -riscv-v-vector-bits-min=128 < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zfh,+experimental-zvfh \ +; RUN: -riscv-v-vector-bits-min=128 < %s | FileCheck %s + +declare <4 x half> @llvm.vp.sitofp.v4f16.v4i1(<4 x i1>, <4 x i1>, i32) + +define <4 x half> @vsitofp_v4f16_v4i1(<4 x i1> %va, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_v4f16_v4i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmerge.vim v9, v9, -1, v0 +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vfcvt.f.x.v v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <4 x half> @llvm.vp.sitofp.v4f16.v4i1(<4 x i1> %va, <4 x i1> %m, i32 %evl) + ret <4 x half> %v +} + +define <4 x half> @vsitofp_v4f16_v4i1_unmasked(<4 x i1> %va, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_v4f16_v4i1_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret + %v = call <4 x half> @llvm.vp.sitofp.v4f16.v4i1(<4 x i1> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) + ret <4 x half> %v +} + +declare <4 x float> @llvm.vp.sitofp.v4f32.v4i1(<4 x i1>, <4 x i1>, i32) + +define <4 x float> @vsitofp_v4f32_v4i1(<4 x i1> %va, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_v4f32_v4i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmerge.vim v9, v9, -1, v0 +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vfcvt.f.x.v v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <4 x float> @llvm.vp.sitofp.v4f32.v4i1(<4 x i1> %va, <4 x i1> %m, i32 %evl) + ret <4 x float> %v +} + +define <4 x float> @vsitofp_v4f32_v4i1_unmasked(<4 x i1> %va, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_v4f32_v4i1_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret + %v = call <4 x float> @llvm.vp.sitofp.v4f32.v4i1(<4 x i1> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) + ret <4 x float> %v +} + +declare <4 x double> @llvm.vp.sitofp.v4f64.v4i1(<4 x i1>, <4 x i1>, i32) + +define <4 x double> @vsitofp_v4f64_v4i1(<4 x i1> %va, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_v4f64_v4i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vmv.v.i v10, 0 +; CHECK-NEXT: vmerge.vim v10, v10, -1, v0 +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vfcvt.f.x.v v8, v10, v0.t +; CHECK-NEXT: ret + %v = call <4 x double> @llvm.vp.sitofp.v4f64.v4i1(<4 x i1> %va, <4 x i1> %m, i32 %evl) + ret <4 x double> %v +} + +define <4 x double> @vsitofp_v4f64_v4i1_unmasked(<4 x i1> %va, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_v4f64_v4i1_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret + %v = call <4 x double> @llvm.vp.sitofp.v4f64.v4i1(<4 x i1> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) + ret <4 x double> %v +} diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp.ll @@ -0,0 +1,289 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zfh,+experimental-zvfh \ +; RUN: -riscv-v-vector-bits-min=128 < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zfh,+experimental-zvfh \ +; RUN: -riscv-v-vector-bits-min=128 < %s | FileCheck %s + +declare <4 x half> @llvm.vp.sitofp.v4f16.v4i8(<4 x i8>, <4 x i1>, i32) + +define <4 x half> @vsitofp_v4f16_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_v4f16_v4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v9, v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %v = call <4 x half> @llvm.vp.sitofp.v4f16.v4i8(<4 x i8> %va, <4 x i1> %m, i32 %evl) + ret <4 x half> %v +} + +define <4 x half> @vsitofp_v4f16_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_v4f16_v4i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %v = call <4 x half> @llvm.vp.sitofp.v4f16.v4i8(<4 x i8> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) + ret <4 x half> %v +} + +declare <4 x half> @llvm.vp.sitofp.v4f16.v4i16(<4 x i16>, <4 x i1>, i32) + +define <4 x half> @vsitofp_v4f16_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_v4f16_v4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t +; CHECK-NEXT: ret + %v = call <4 x half> @llvm.vp.sitofp.v4f16.v4i16(<4 x i16> %va, <4 x i1> %m, i32 %evl) + ret <4 x half> %v +} + +define <4 x half> @vsitofp_v4f16_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_v4f16_v4i16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret + %v = call <4 x half> @llvm.vp.sitofp.v4f16.v4i16(<4 x i16> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) + ret <4 x half> %v +} + +declare <4 x half> @llvm.vp.sitofp.v4f16.v4i32(<4 x i32>, <4 x i1>, i32) + +define <4 x half> @vsitofp_v4f16_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_v4f16_v4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v9, v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %v = call <4 x half> @llvm.vp.sitofp.v4f16.v4i32(<4 x i32> %va, <4 x i1> %m, i32 %evl) + ret <4 x half> %v +} + +define <4 x half> @vsitofp_v4f16_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_v4f16_v4i32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %v = call <4 x half> @llvm.vp.sitofp.v4f16.v4i32(<4 x i32> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) + ret <4 x half> %v +} + +declare <4 x half> @llvm.vp.sitofp.v4f16.v4i64(<4 x i64>, <4 x i1>, i32) + +define <4 x half> @vsitofp_v4f16_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_v4f16_v4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v10, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vfncvt.f.f.w v8, v10, v0.t +; CHECK-NEXT: ret + %v = call <4 x half> @llvm.vp.sitofp.v4f16.v4i64(<4 x i64> %va, <4 x i1> %m, i32 %evl) + ret <4 x half> %v +} + +define <4 x half> @vsitofp_v4f16_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_v4f16_v4i64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v10, v8 +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vfncvt.f.f.w v8, v10 +; CHECK-NEXT: ret + %v = call <4 x half> @llvm.vp.sitofp.v4f16.v4i64(<4 x i64> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) + ret <4 x half> %v +} + +declare <4 x float> @llvm.vp.sitofp.v4f32.v4i8(<4 x i8>, <4 x i1>, i32) + +define <4 x float> @vsitofp_v4f32_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_v4f32_v4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsext.vf2 v9, v8, v0.t +; CHECK-NEXT: vfwcvt.f.x.v v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <4 x float> @llvm.vp.sitofp.v4f32.v4i8(<4 x i8> %va, <4 x i1> %m, i32 %evl) + ret <4 x float> %v +} + +define <4 x float> @vsitofp_v4f32_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_v4f32_v4i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsext.vf2 v9, v8 +; CHECK-NEXT: vfwcvt.f.x.v v8, v9 +; CHECK-NEXT: ret + %v = call <4 x float> @llvm.vp.sitofp.v4f32.v4i8(<4 x i8> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) + ret <4 x float> %v +} + +declare <4 x float> @llvm.vp.sitofp.v4f32.v4i16(<4 x i16>, <4 x i1>, i32) + +define <4 x float> @vsitofp_v4f32_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_v4f32_v4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v9, v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %v = call <4 x float> @llvm.vp.sitofp.v4f32.v4i16(<4 x i16> %va, <4 x i1> %m, i32 %evl) + ret <4 x float> %v +} + +define <4 x float> @vsitofp_v4f32_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_v4f32_v4i16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %v = call <4 x float> @llvm.vp.sitofp.v4f32.v4i16(<4 x i16> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) + ret <4 x float> %v +} + +declare <4 x float> @llvm.vp.sitofp.v4f32.v4i32(<4 x i32>, <4 x i1>, i32) + +define <4 x float> @vsitofp_v4f32_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_v4f32_v4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t +; CHECK-NEXT: ret + %v = call <4 x float> @llvm.vp.sitofp.v4f32.v4i32(<4 x i32> %va, <4 x i1> %m, i32 %evl) + ret <4 x float> %v +} + +define <4 x float> @vsitofp_v4f32_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_v4f32_v4i32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret + %v = call <4 x float> @llvm.vp.sitofp.v4f32.v4i32(<4 x i32> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) + ret <4 x float> %v +} + +declare <4 x float> @llvm.vp.sitofp.v4f32.v4i64(<4 x i64>, <4 x i1>, i32) + +define <4 x float> @vsitofp_v4f32_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_v4f32_v4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v10, v8, v0.t +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret + %v = call <4 x float> @llvm.vp.sitofp.v4f32.v4i64(<4 x i64> %va, <4 x i1> %m, i32 %evl) + ret <4 x float> %v +} + +define <4 x float> @vsitofp_v4f32_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_v4f32_v4i64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v10, v8 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret + %v = call <4 x float> @llvm.vp.sitofp.v4f32.v4i64(<4 x i64> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) + ret <4 x float> %v +} + +declare <4 x double> @llvm.vp.sitofp.v4f64.v4i8(<4 x i8>, <4 x i1>, i32) + +define <4 x double> @vsitofp_v4f64_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_v4f64_v4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsext.vf4 v10, v8, v0.t +; CHECK-NEXT: vfwcvt.f.x.v v8, v10, v0.t +; CHECK-NEXT: ret + %v = call <4 x double> @llvm.vp.sitofp.v4f64.v4i8(<4 x i8> %va, <4 x i1> %m, i32 %evl) + ret <4 x double> %v +} + +define <4 x double> @vsitofp_v4f64_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_v4f64_v4i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsext.vf4 v10, v8 +; CHECK-NEXT: vfwcvt.f.x.v v8, v10 +; CHECK-NEXT: ret + %v = call <4 x double> @llvm.vp.sitofp.v4f64.v4i8(<4 x i8> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) + ret <4 x double> %v +} + +declare <4 x double> @llvm.vp.sitofp.v4f64.v4i16(<4 x i16>, <4 x i1>, i32) + +define <4 x double> @vsitofp_v4f64_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_v4f64_v4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsext.vf2 v10, v8, v0.t +; CHECK-NEXT: vfwcvt.f.x.v v8, v10, v0.t +; CHECK-NEXT: ret + %v = call <4 x double> @llvm.vp.sitofp.v4f64.v4i16(<4 x i16> %va, <4 x i1> %m, i32 %evl) + ret <4 x double> %v +} + +define <4 x double> @vsitofp_v4f64_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_v4f64_v4i16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsext.vf2 v10, v8 +; CHECK-NEXT: vfwcvt.f.x.v v8, v10 +; CHECK-NEXT: ret + %v = call <4 x double> @llvm.vp.sitofp.v4f64.v4i16(<4 x i16> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) + ret <4 x double> %v +} + +declare <4 x double> @llvm.vp.sitofp.v4f64.v4i32(<4 x i32>, <4 x i1>, i32) + +define <4 x double> @vsitofp_v4f64_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_v4f64_v4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v10, v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: ret + %v = call <4 x double> @llvm.vp.sitofp.v4f64.v4i32(<4 x i32> %va, <4 x i1> %m, i32 %evl) + ret <4 x double> %v +} + +define <4 x double> @vsitofp_v4f64_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_v4f64_v4i32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v10, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: ret + %v = call <4 x double> @llvm.vp.sitofp.v4f64.v4i32(<4 x i32> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) + ret <4 x double> %v +} + +declare <4 x double> @llvm.vp.sitofp.v4f64.v4i64(<4 x i64>, <4 x i1>, i32) + +define <4 x double> @vsitofp_v4f64_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_v4f64_v4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t +; CHECK-NEXT: ret + %v = call <4 x double> @llvm.vp.sitofp.v4f64.v4i64(<4 x i64> %va, <4 x i1> %m, i32 %evl) + ret <4 x double> %v +} + +define <4 x double> @vsitofp_v4f64_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_v4f64_v4i64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret + %v = call <4 x double> @llvm.vp.sitofp.v4f64.v4i64(<4 x i64> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) + ret <4 x double> %v +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll @@ -0,0 +1,297 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zfh,+experimental-zvfh < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zfh,+experimental-zvfh < %s | FileCheck %s + +declare @llvm.vp.fptosi.nxv2i8.nxv2f16(, , i32) + +define @vfptosi_nxv2i8_nxv2f16( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_nxv2i8_nxv2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %v = call @llvm.vp.fptosi.nxv2i8.nxv2f16( %va, %m, i32 %evl) + ret %v +} + +define @vfptosi_nxv2i8_nxv2f16_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_nxv2i8_nxv2f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %v = call @llvm.vp.fptosi.nxv2i8.nxv2f16( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + ret %v +} + +declare @llvm.vp.fptosi.nxv2i16.nxv2f16(, , i32) + +define @vfptosi_nxv2i16_nxv2f16( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_nxv2i16_nxv2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fptosi.nxv2i16.nxv2f16( %va, %m, i32 %evl) + ret %v +} + +define @vfptosi_nxv2i16_nxv2f16_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_nxv2i16_nxv2f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 +; CHECK-NEXT: ret + %v = call @llvm.vp.fptosi.nxv2i16.nxv2f16( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + ret %v +} + +declare @llvm.vp.fptosi.nxv2i32.nxv2f16(, , i32) + +define @vfptosi_nxv2i32_nxv2f16( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_nxv2i32_nxv2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwcvt.rtz.x.f.v v9, v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %v = call @llvm.vp.fptosi.nxv2i32.nxv2f16( %va, %m, i32 %evl) + ret %v +} + +define @vfptosi_nxv2i32_nxv2f16_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_nxv2i32_nxv2f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwcvt.rtz.x.f.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %v = call @llvm.vp.fptosi.nxv2i32.nxv2f16( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + ret %v +} + +declare @llvm.vp.fptosi.nxv2i64.nxv2f16(, , i32) + +define @vfptosi_nxv2i64_nxv2f16( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_nxv2i64_nxv2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwcvt.f.f.v v10, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v10, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fptosi.nxv2i64.nxv2f16( %va, %m, i32 %evl) + ret %v +} + +define @vfptosi_nxv2i64_nxv2f16_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_nxv2i64_nxv2f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwcvt.f.f.v v10, v8 +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v10 +; CHECK-NEXT: ret + %v = call @llvm.vp.fptosi.nxv2i64.nxv2f16( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + ret %v +} + +declare @llvm.vp.fptosi.nxv2i8.nxv2f32(, , i32) + +define @vfptosi_nxv2i8_nxv2f32( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_nxv2i8_nxv2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-NEXT: vncvt.x.x.w v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fptosi.nxv2i8.nxv2f32( %va, %m, i32 %evl) + ret %v +} + +define @vfptosi_nxv2i8_nxv2f32_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_nxv2i8_nxv2f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-NEXT: vncvt.x.x.w v8, v9 +; CHECK-NEXT: ret + %v = call @llvm.vp.fptosi.nxv2i8.nxv2f32( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + ret %v +} + +declare @llvm.vp.fptosi.nxv2i16.nxv2f32(, , i32) + +define @vfptosi_nxv2i16_nxv2f32( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_nxv2i16_nxv2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %v = call @llvm.vp.fptosi.nxv2i16.nxv2f32( %va, %m, i32 %evl) + ret %v +} + +define @vfptosi_nxv2i16_nxv2f32_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_nxv2i16_nxv2f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %v = call @llvm.vp.fptosi.nxv2i16.nxv2f32( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + ret %v +} + +declare @llvm.vp.fptosi.nxv2i32.nxv2f32(, , i32) + +define @vfptosi_nxv2i32_nxv2f32( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_nxv2i32_nxv2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fptosi.nxv2i32.nxv2f32( %va, %m, i32 %evl) + ret %v +} + +define @vfptosi_nxv2i32_nxv2f32_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_nxv2i32_nxv2f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 +; CHECK-NEXT: ret + %v = call @llvm.vp.fptosi.nxv2i32.nxv2f32( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + ret %v +} + +declare @llvm.vp.fptosi.nxv2i64.nxv2f32(, , i32) + +define @vfptosi_nxv2i64_nxv2f32( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_nxv2i64_nxv2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfwcvt.rtz.x.f.v v10, v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: ret + %v = call @llvm.vp.fptosi.nxv2i64.nxv2f32( %va, %m, i32 %evl) + ret %v +} + +define @vfptosi_nxv2i64_nxv2f32_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_nxv2i64_nxv2f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfwcvt.rtz.x.f.v v10, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: ret + %v = call @llvm.vp.fptosi.nxv2i64.nxv2f32( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + ret %v +} + +declare @llvm.vp.fptosi.nxv2i8.nxv2f64(, , i32) + +define @vfptosi_nxv2i8_nxv2f64( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_nxv2i8_nxv2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vncvt.x.x.w v8, v10, v0.t +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-NEXT: vncvt.x.x.w v8, v8, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fptosi.nxv2i8.nxv2f64( %va, %m, i32 %evl) + ret %v +} + +define @vfptosi_nxv2i8_nxv2f64_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_nxv2i8_nxv2f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vncvt.x.x.w v8, v10 +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: ret + %v = call @llvm.vp.fptosi.nxv2i8.nxv2f64( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + ret %v +} + +declare @llvm.vp.fptosi.nxv2i16.nxv2f64(, , i32) + +define @vfptosi_nxv2i16_nxv2f64( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_nxv2i16_nxv2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vncvt.x.x.w v8, v10, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fptosi.nxv2i16.nxv2f64( %va, %m, i32 %evl) + ret %v +} + +define @vfptosi_nxv2i16_nxv2f64_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_nxv2i16_nxv2f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vncvt.x.x.w v8, v10 +; CHECK-NEXT: ret + %v = call @llvm.vp.fptosi.nxv2i16.nxv2f64( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + ret %v +} + +declare @llvm.vp.fptosi.nxv2i32.nxv2f64(, , i32) + +define @vfptosi_nxv2i32_nxv2f64( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_nxv2i32_nxv2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8, v0.t +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret + %v = call @llvm.vp.fptosi.nxv2i32.nxv2f64( %va, %m, i32 %evl) + ret %v +} + +define @vfptosi_nxv2i32_nxv2f64_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_nxv2i32_nxv2f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret + %v = call @llvm.vp.fptosi.nxv2i32.nxv2f64( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + ret %v +} + +declare @llvm.vp.fptosi.nxv2i64.nxv2f64(, , i32) + +define @vfptosi_nxv2i64_nxv2f64( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_nxv2i64_nxv2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fptosi.nxv2i64.nxv2f64( %va, %m, i32 %evl) + ret %v +} + +define @vfptosi_nxv2i64_nxv2f64_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_nxv2i64_nxv2f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 +; CHECK-NEXT: ret + %v = call @llvm.vp.fptosi.nxv2i64.nxv2f64( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + ret %v +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp-mask.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp-mask.ll @@ -0,0 +1,84 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zfh,+experimental-zvfh < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zfh,+experimental-zvfh < %s | FileCheck %s + +declare @llvm.vp.sitofp.nxv2f16.nxv2i1(, , i32) + +define @vsitofp_nxv2f16_nxv2i1( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_nxv2f16_nxv2i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmerge.vim v9, v9, -1, v0 +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vfcvt.f.x.v v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.sitofp.nxv2f16.nxv2i1( %va, %m, i32 %evl) + ret %v +} + +define @vsitofp_nxv2f16_nxv2i1_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_nxv2f16_nxv2i1_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret + %v = call @llvm.vp.sitofp.nxv2f16.nxv2i1( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + ret %v +} + +declare @llvm.vp.sitofp.nxv2f32.nxv2i1(, , i32) + +define @vsitofp_nxv2f32_nxv2i1( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_nxv2f32_nxv2i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmerge.vim v9, v9, -1, v0 +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vfcvt.f.x.v v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.sitofp.nxv2f32.nxv2i1( %va, %m, i32 %evl) + ret %v +} + +define @vsitofp_nxv2f32_nxv2i1_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_nxv2f32_nxv2i1_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret + %v = call @llvm.vp.sitofp.nxv2f32.nxv2i1( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + ret %v +} + +declare @llvm.vp.sitofp.nxv2f64.nxv2i1(, , i32) + +define @vsitofp_nxv2f64_nxv2i1( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_nxv2f64_nxv2i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vmv.v.i v10, 0 +; CHECK-NEXT: vmerge.vim v10, v10, -1, v0 +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vfcvt.f.x.v v8, v10, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.sitofp.nxv2f64.nxv2i1( %va, %m, i32 %evl) + ret %v +} + +define @vsitofp_nxv2f64_nxv2i1_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_nxv2f64_nxv2i1_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret + %v = call @llvm.vp.sitofp.nxv2f64.nxv2i1( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + ret %v +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll @@ -0,0 +1,287 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zfh,+experimental-zvfh < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zfh,+experimental-zvfh < %s | FileCheck %s + +declare @llvm.vp.sitofp.nxv2f16.nxv2i8(, , i32) + +define @vsitofp_nxv2f16_nxv2i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_nxv2f16_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v9, v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %v = call @llvm.vp.sitofp.nxv2f16.nxv2i8( %va, %m, i32 %evl) + ret %v +} + +define @vsitofp_nxv2f16_nxv2i8_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_nxv2f16_nxv2i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %v = call @llvm.vp.sitofp.nxv2f16.nxv2i8( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + ret %v +} + +declare @llvm.vp.sitofp.nxv2f16.nxv2i16(, , i32) + +define @vsitofp_nxv2f16_nxv2i16( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_nxv2f16_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.sitofp.nxv2f16.nxv2i16( %va, %m, i32 %evl) + ret %v +} + +define @vsitofp_nxv2f16_nxv2i16_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_nxv2f16_nxv2i16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret + %v = call @llvm.vp.sitofp.nxv2f16.nxv2i16( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + ret %v +} + +declare @llvm.vp.sitofp.nxv2f16.nxv2i32(, , i32) + +define @vsitofp_nxv2f16_nxv2i32( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_nxv2f16_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v9, v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %v = call @llvm.vp.sitofp.nxv2f16.nxv2i32( %va, %m, i32 %evl) + ret %v +} + +define @vsitofp_nxv2f16_nxv2i32_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_nxv2f16_nxv2i32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %v = call @llvm.vp.sitofp.nxv2f16.nxv2i32( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + ret %v +} + +declare @llvm.vp.sitofp.nxv2f16.nxv2i64(, , i32) + +define @vsitofp_nxv2f16_nxv2i64( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_nxv2f16_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v10, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vfncvt.f.f.w v8, v10, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.sitofp.nxv2f16.nxv2i64( %va, %m, i32 %evl) + ret %v +} + +define @vsitofp_nxv2f16_nxv2i64_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_nxv2f16_nxv2i64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v10, v8 +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vfncvt.f.f.w v8, v10 +; CHECK-NEXT: ret + %v = call @llvm.vp.sitofp.nxv2f16.nxv2i64( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + ret %v +} + +declare @llvm.vp.sitofp.nxv2f32.nxv2i8(, , i32) + +define @vsitofp_nxv2f32_nxv2i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_nxv2f32_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsext.vf2 v9, v8, v0.t +; CHECK-NEXT: vfwcvt.f.x.v v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.sitofp.nxv2f32.nxv2i8( %va, %m, i32 %evl) + ret %v +} + +define @vsitofp_nxv2f32_nxv2i8_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_nxv2f32_nxv2i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsext.vf2 v9, v8 +; CHECK-NEXT: vfwcvt.f.x.v v8, v9 +; CHECK-NEXT: ret + %v = call @llvm.vp.sitofp.nxv2f32.nxv2i8( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + ret %v +} + +declare @llvm.vp.sitofp.nxv2f32.nxv2i16(, , i32) + +define @vsitofp_nxv2f32_nxv2i16( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_nxv2f32_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v9, v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %v = call @llvm.vp.sitofp.nxv2f32.nxv2i16( %va, %m, i32 %evl) + ret %v +} + +define @vsitofp_nxv2f32_nxv2i16_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_nxv2f32_nxv2i16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %v = call @llvm.vp.sitofp.nxv2f32.nxv2i16( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + ret %v +} + +declare @llvm.vp.sitofp.nxv2f32.nxv2i32(, , i32) + +define @vsitofp_nxv2f32_nxv2i32( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_nxv2f32_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.sitofp.nxv2f32.nxv2i32( %va, %m, i32 %evl) + ret %v +} + +define @vsitofp_nxv2f32_nxv2i32_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_nxv2f32_nxv2i32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret + %v = call @llvm.vp.sitofp.nxv2f32.nxv2i32( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + ret %v +} + +declare @llvm.vp.sitofp.nxv2f32.nxv2i64(, , i32) + +define @vsitofp_nxv2f32_nxv2i64( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_nxv2f32_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v10, v8, v0.t +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret + %v = call @llvm.vp.sitofp.nxv2f32.nxv2i64( %va, %m, i32 %evl) + ret %v +} + +define @vsitofp_nxv2f32_nxv2i64_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_nxv2f32_nxv2i64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v10, v8 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret + %v = call @llvm.vp.sitofp.nxv2f32.nxv2i64( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + ret %v +} + +declare @llvm.vp.sitofp.nxv2f64.nxv2i8(, , i32) + +define @vsitofp_nxv2f64_nxv2i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_nxv2f64_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsext.vf4 v10, v8, v0.t +; CHECK-NEXT: vfwcvt.f.x.v v8, v10, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.sitofp.nxv2f64.nxv2i8( %va, %m, i32 %evl) + ret %v +} + +define @vsitofp_nxv2f64_nxv2i8_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_nxv2f64_nxv2i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsext.vf4 v10, v8 +; CHECK-NEXT: vfwcvt.f.x.v v8, v10 +; CHECK-NEXT: ret + %v = call @llvm.vp.sitofp.nxv2f64.nxv2i8( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + ret %v +} + +declare @llvm.vp.sitofp.nxv2f64.nxv2i16(, , i32) + +define @vsitofp_nxv2f64_nxv2i16( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_nxv2f64_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsext.vf2 v10, v8, v0.t +; CHECK-NEXT: vfwcvt.f.x.v v8, v10, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.sitofp.nxv2f64.nxv2i16( %va, %m, i32 %evl) + ret %v +} + +define @vsitofp_nxv2f64_nxv2i16_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_nxv2f64_nxv2i16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsext.vf2 v10, v8 +; CHECK-NEXT: vfwcvt.f.x.v v8, v10 +; CHECK-NEXT: ret + %v = call @llvm.vp.sitofp.nxv2f64.nxv2i16( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + ret %v +} + +declare @llvm.vp.sitofp.nxv2f64.nxv2i32(, , i32) + +define @vsitofp_nxv2f64_nxv2i32( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_nxv2f64_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v10, v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: ret + %v = call @llvm.vp.sitofp.nxv2f64.nxv2i32( %va, %m, i32 %evl) + ret %v +} + +define @vsitofp_nxv2f64_nxv2i32_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_nxv2f64_nxv2i32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v10, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: ret + %v = call @llvm.vp.sitofp.nxv2f64.nxv2i32( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + ret %v +} + +declare @llvm.vp.sitofp.nxv2f64.nxv2i64(, , i32) + +define @vsitofp_nxv2f64_nxv2i64( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_nxv2f64_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.sitofp.nxv2f64.nxv2i64( %va, %m, i32 %evl) + ret %v +} + +define @vsitofp_nxv2f64_nxv2i64_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_nxv2f64_nxv2i64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret + %v = call @llvm.vp.sitofp.nxv2f64.nxv2i64( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + ret %v +}