diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -647,6 +647,8 @@ SDValue lowerVPOp(SDValue Op, SelectionDAG &DAG, unsigned RISCVISDOpc) const; SDValue lowerLogicVPOp(SDValue Op, SelectionDAG &DAG, unsigned MaskOpc, unsigned VecOpc) const; + SDValue lowerVPFPIntConvOp(SDValue Op, SelectionDAG &DAG, + unsigned RISCVISDOpc) const; SDValue lowerFixedLengthVectorExtendToRVV(SDValue Op, SelectionDAG &DAG, unsigned ExtendOpc) const; SDValue lowerGET_ROUNDING(SDValue Op, SelectionDAG &DAG) const; diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -490,13 +490,14 @@ ISD::VP_SHL, ISD::VP_REDUCE_ADD, ISD::VP_REDUCE_AND, ISD::VP_REDUCE_OR, ISD::VP_REDUCE_XOR, ISD::VP_REDUCE_SMAX, ISD::VP_REDUCE_SMIN, ISD::VP_REDUCE_UMAX, ISD::VP_REDUCE_UMIN, - ISD::VP_MERGE, ISD::VP_SELECT}; + ISD::VP_MERGE, ISD::VP_SELECT, ISD::VP_FPTOSI}; static const unsigned FloatingPointVPOps[] = { ISD::VP_FADD, ISD::VP_FSUB, ISD::VP_FMUL, ISD::VP_FDIV, ISD::VP_FNEG, ISD::VP_FMA, ISD::VP_REDUCE_FADD, ISD::VP_REDUCE_SEQ_FADD, ISD::VP_REDUCE_FMIN, - ISD::VP_REDUCE_FMAX, ISD::VP_MERGE, ISD::VP_SELECT}; + ISD::VP_REDUCE_FMAX, ISD::VP_MERGE, ISD::VP_SELECT, + ISD::VP_SITOFP}; if (!Subtarget.is64Bit()) { // We must custom-lower certain vXi64 operations on RV32 due to the vector @@ -846,6 +847,8 @@ setOperationAction(ISD::AND, VT, Custom); setOperationAction(ISD::OR, VT, Custom); setOperationAction(ISD::XOR, VT, Custom); + + setOperationAction(ISD::VP_FPTOSI, VT, Custom); continue; } @@ -3692,6 +3695,10 @@ return lowerVPOp(Op, DAG, RISCVISD::FNEG_VL); case ISD::VP_FMA: return lowerVPOp(Op, DAG, RISCVISD::FMA_VL); + case ISD::VP_FPTOSI: + return lowerVPFPIntConvOp(Op, DAG, RISCVISD::FP_TO_SINT_VL); + case ISD::VP_SITOFP: + return lowerVPFPIntConvOp(Op, DAG, RISCVISD::SINT_TO_FP_VL); } } @@ -6126,6 +6133,135 @@ return convertFromScalableVector(VT, VPOp, DAG, Subtarget); } +// Lower Floating-Point/Integer Type-Convert VP SDNodes +SDValue RISCVTargetLowering::lowerVPFPIntConvOp(SDValue Op, SelectionDAG &DAG, + unsigned RISCVISDOpc) const { + SDLoc DL(Op); + MVT VT = Op.getSimpleValueType(); + + // Ops indexes: 0->Op1, 1->Mask, 2->EVL + SmallVector Ops; + for (const auto &OpIdx : enumerate(Op->ops())) { + SDValue V = OpIdx.value(); + assert(!isa(V) && "Unexpected VTSDNode node!"); + // Pass through operands which aren't fixed-length vectors. + if (!V.getValueType().isFixedLengthVector()) { + Ops.push_back(V); + continue; + } + // "cast" fixed length vector to a scalable vector. + MVT OpVT = V.getSimpleValueType(); + MVT ContainerVT = getContainerForFixedLengthVector(OpVT); + assert(useRVVForFixedLengthVectorVT(OpVT) && + "Only fixed length vectors are supported!"); + Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget)); + } + + MVT ContainerVT = VT; + if (VT.isFixedLengthVector()) + ContainerVT = getContainerForFixedLengthVector(VT); + + unsigned RISCVISDExtOpc = (RISCVISDOpc == RISCVISD::SINT_TO_FP_VL || + RISCVISDOpc == RISCVISD::FP_TO_SINT_VL) + ? RISCVISD::VSEXT_VL + : RISCVISD::VZEXT_VL; + + MVT DstType = ContainerVT; + unsigned DstTypeSize = DstType.getScalarSizeInBits(); + MVT SrcType = Ops[0].getSimpleValueType(); + unsigned SrcTypeSize = SrcType.getScalarSizeInBits(); + + SDValue Result; + if (DstTypeSize >= SrcTypeSize) { // Single-width and widening conversion. + if (SrcType.isInteger()) { + assert(DstType.isFloatingPoint() && "Wrong input/output vector types"); + + // Do we need to do any pre-widening before converting? + if (SrcTypeSize == 1) { + MVT IntVT = ContainerVT.changeVectorElementTypeToInteger(); + MVT XLenVT = Subtarget.getXLenVT(); + SDValue Zero = DAG.getConstant(0, DL, XLenVT); + SDValue ZeroSplat = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, IntVT, + DAG.getUNDEF(IntVT), Zero, Ops[2]); + SDValue One = DAG.getConstant( + RISCVISDExtOpc == RISCVISD::VZEXT_VL ? 1 : -1, DL, XLenVT); + SDValue OneSplat = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, IntVT, + DAG.getUNDEF(IntVT), One, Ops[2]); + Ops[0] = DAG.getNode(RISCVISD::VSELECT_VL, DL, IntVT, Ops[0], OneSplat, + ZeroSplat, Ops[2]); + } else if (DstTypeSize > (2 * SrcTypeSize)) { + // Widen before converting. + MVT IntVT = MVT::getVectorVT(MVT::getIntegerVT(DstTypeSize / 2), + ContainerVT.getVectorElementCount()); + Ops[0] = DAG.getNode(RISCVISDExtOpc, DL, IntVT, Ops); + } + + Result = DAG.getNode(RISCVISDOpc, DL, ContainerVT, Ops); + } else { + assert(SrcType.isFloatingPoint() && DstType.isInteger() && + "Wrong input/output vector types"); + + // Convert f16 to f32 then convert f32 to i64. + if (DstTypeSize > (2 * SrcTypeSize)) { + assert(SrcType.getVectorElementType() == MVT::f16 && + "Unexpected type!"); + MVT InterimFVT = + MVT::getVectorVT(MVT::f32, ContainerVT.getVectorElementCount()); + Ops[0] = DAG.getNode(RISCVISD::FP_EXTEND_VL, DL, InterimFVT, Ops); + } + + Result = DAG.getNode(RISCVISDOpc, DL, ContainerVT, Ops); + } + } else { // Narrowing + Conversion + if (SrcType.isInteger()) { + assert(DstType.isFloatingPoint() && "Wrong input/output vector types"); + // First do a narrowing convert to an FP type half the size, then round + // the FP type to a small FP type if needed. + + MVT InterimFVT = ContainerVT; + if (SrcTypeSize > (2 * DstTypeSize)) { + assert(SrcTypeSize == (4 * DstTypeSize) && "Unexpected types!"); + assert(DstType.getVectorElementType() == MVT::f16 && + "Unexpected type!"); + InterimFVT = + MVT::getVectorVT(MVT::f32, ContainerVT.getVectorElementCount()); + } + + Result = DAG.getNode(RISCVISDOpc, DL, InterimFVT, Ops); + + if (InterimFVT != ContainerVT) { + Ops[0] = Result; + Result = DAG.getNode(RISCVISD::FP_ROUND_VL, DL, ContainerVT, Ops); + } + } else { + assert(SrcType.isFloatingPoint() && DstType.isInteger() && + "Wrong input/output vector types"); + // First do a narrowing conversion to an integer half the size, then + // truncate if needed. + + // TODO: Handle mask vectors + assert(DstType.getVectorElementType() != MVT::i1 && + "Don't know how to handle masks yet!"); + MVT InterimIVT = MVT::getVectorVT(MVT::getIntegerVT(SrcTypeSize / 2), + ContainerVT.getVectorElementCount()); + + Result = DAG.getNode(RISCVISDOpc, DL, InterimIVT, Ops); + + while (InterimIVT != ContainerVT) { + SrcTypeSize /= 2; + Ops[0] = Result; + InterimIVT = MVT::getVectorVT(MVT::getIntegerVT(SrcTypeSize / 2), + ContainerVT.getVectorElementCount()); + Result = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, InterimIVT, Ops); + } + } + } + + if (!VT.isFixedLengthVector()) + return Result; + return convertFromScalableVector(VT, Result, DAG, Subtarget); +} + SDValue RISCVTargetLowering::lowerLogicVPOp(SDValue Op, SelectionDAG &DAG, unsigned MaskOpc, unsigned VecOpc) const { diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td @@ -553,6 +553,12 @@ true_mask, VLOpFrag)), (!cast(inst_name#"_"#suffix#"_"#vti.LMul.MX) fti.RegClass:$rs2, GPR:$vl, vti.Log2SEW)>; + def : Pat<(vti.Vector (vop (fti.Vector fti.RegClass:$rs2), + (fti.Mask V0), VLOpFrag)), + (!cast(inst_name#"_"#suffix#"_"#vti.LMul.MX#"_MASK") + (vti.Vector (IMPLICIT_DEF)), + fti.RegClass:$rs2, + (fti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; } } @@ -564,6 +570,12 @@ VLOpFrag)), (!cast(instruction_name#"_"#ivti.LMul.MX) fvti.RegClass:$rs1, GPR:$vl, ivti.Log2SEW)>; + def : Pat<(ivti.Vector (vop (fvti.Vector fvti.RegClass:$rs1), + (fvti.Mask V0), + VLOpFrag)), + (!cast(instruction_name#"_"#ivti.LMul.MX#"_MASK") + (ivti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1, + (fvti.Mask V0), GPR:$vl, ivti.Log2SEW, TAIL_AGNOSTIC)>; } } @@ -575,6 +587,12 @@ VLOpFrag)), (!cast(instruction_name#"_"#fvti.LMul.MX) ivti.RegClass:$rs1, GPR:$vl, fvti.Log2SEW)>; + def : Pat<(fvti.Vector (vop (ivti.Vector ivti.RegClass:$rs1), + (ivti.Mask V0), + VLOpFrag)), + (!cast(instruction_name#"_"#fvti.LMul.MX#"_MASK") + (fvti.Vector (IMPLICIT_DEF)), ivti.RegClass:$rs1, + (ivti.Mask V0), GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC)>; } } @@ -587,6 +605,12 @@ VLOpFrag)), (!cast(instruction_name#"_"#fvti.LMul.MX) fvti.RegClass:$rs1, GPR:$vl, fvti.Log2SEW)>; + def : Pat<(iwti.Vector (vop (fvti.Vector fvti.RegClass:$rs1), + (fvti.Mask V0), + VLOpFrag)), + (!cast(instruction_name#"_"#fvti.LMul.MX#"_MASK") + (iwti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1, + (fvti.Mask V0), GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC)>; } } @@ -599,6 +623,12 @@ VLOpFrag)), (!cast(instruction_name#"_"#ivti.LMul.MX) ivti.RegClass:$rs1, GPR:$vl, ivti.Log2SEW)>; + def : Pat<(fwti.Vector (vop (ivti.Vector ivti.RegClass:$rs1), + (ivti.Mask V0), + VLOpFrag)), + (!cast(instruction_name#"_"#ivti.LMul.MX#"_MASK") + (fwti.Vector (IMPLICIT_DEF)), ivti.RegClass:$rs1, + (ivti.Mask V0), GPR:$vl, ivti.Log2SEW, TAIL_AGNOSTIC)>; } } @@ -611,6 +641,12 @@ VLOpFrag)), (!cast(instruction_name#"_"#vti.LMul.MX) fwti.RegClass:$rs1, GPR:$vl, vti.Log2SEW)>; + def : Pat<(vti.Vector (vop (fwti.Vector fwti.RegClass:$rs1), + (fwti.Mask V0), + VLOpFrag)), + (!cast(instruction_name#"_"#vti.LMul.MX#"_MASK") + (vti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1, + (fwti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; } } @@ -623,6 +659,12 @@ VLOpFrag)), (!cast(instruction_name#"_"#fvti.LMul.MX) iwti.RegClass:$rs1, GPR:$vl, fvti.Log2SEW)>; + def : Pat<(fvti.Vector (vop (iwti.Vector iwti.RegClass:$rs1), + (iwti.Mask V0), + VLOpFrag)), + (!cast(instruction_name#"_"#fvti.LMul.MX#"_MASK") + (fvti.Vector (IMPLICIT_DEF)), iwti.RegClass:$rs1, + (iwti.Mask V0), GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC)>; } } @@ -808,6 +850,12 @@ VLOpFrag)), (!cast("PseudoVNSRL_WX_"#vti.LMul.MX) wti.RegClass:$rs1, X0, GPR:$vl, vti.Log2SEW)>; + def : Pat<(vti.Vector (riscv_trunc_vector_vl (wti.Vector wti.RegClass:$rs1), + (vti.Mask V0), + VLOpFrag)), + (!cast("PseudoVNSRL_WX_"#vti.LMul.MX#"_MASK") + (vti.Vector (IMPLICIT_DEF)), wti.RegClass:$rs1, X0, + (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; def : Pat<(vti.Vector (riscv_trunc_vector_vl @@ -1504,6 +1552,12 @@ VLOpFrag)), (!cast("PseudoVFWCVT_F_F_V_"#fvti.LMul.MX) fvti.RegClass:$rs1, GPR:$vl, fvti.Log2SEW)>; + def : Pat<(fwti.Vector (riscv_fpextend_vl (fvti.Vector fvti.RegClass:$rs1), + (fvti.Mask V0), + VLOpFrag)), + (!cast("PseudoVFWCVT_F_F_V_"#fvti.LMul.MX#"_MASK") + (fwti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1, + (fvti.Mask V0), GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC)>; } // 14.19 Narrowing Floating-Point/Integer Type-Convert Instructions @@ -1519,6 +1573,12 @@ VLOpFrag)), (!cast("PseudoVFNCVT_F_F_W_"#fvti.LMul.MX) fwti.RegClass:$rs1, GPR:$vl, fvti.Log2SEW)>; + def : Pat<(fvti.Vector (riscv_fpround_vl (fwti.Vector fwti.RegClass:$rs1), + (fwti.Mask V0), + VLOpFrag)), + (!cast("PseudoVFNCVT_F_F_W_"#fvti.LMul.MX#"_MASK") + (fvti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1, + (fwti.Mask V0), GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC)>; def : Pat<(fvti.Vector (riscv_fncvt_rod_vl (fwti.Vector fwti.RegClass:$rs1), (fwti.Mask true_mask), diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp.ll @@ -0,0 +1,298 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zfh,+experimental-zvfh \ +; RUN: -riscv-v-vector-bits-min=128 < %s | FileCheck %s + +declare <4 x i8> @llvm.vp.fptosi.v4i8.v4f16(<4 x half>, <4 x i1>, i32) + +define <4 x i8> @vfptosi_v4i8_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_v4i8_v4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %v = call <4 x i8> @llvm.vp.fptosi.v4i8.v4f16(<4 x half> %va, <4 x i1> %m, i32 %evl) + ret <4 x i8> %v +} + +define <4 x i8> @vfptosi_v4i8_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_v4i8_v4f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %v = call <4 x i8> @llvm.vp.fptosi.v4i8.v4f16(<4 x half> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) + ret <4 x i8> %v +} + +declare <4 x i16> @llvm.vp.fptosi.v4i16.v4f16(<4 x half>, <4 x i1>, i32) + +define <4 x i16> @vfptosi_v4i16_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_v4i16_v4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8, v0.t +; CHECK-NEXT: ret + %v = call <4 x i16> @llvm.vp.fptosi.v4i16.v4f16(<4 x half> %va, <4 x i1> %m, i32 %evl) + ret <4 x i16> %v +} + +define <4 x i16> @vfptosi_v4i16_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_v4i16_v4f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 +; CHECK-NEXT: ret + %v = call <4 x i16> @llvm.vp.fptosi.v4i16.v4f16(<4 x half> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) + ret <4 x i16> %v +} + +declare <4 x i32> @llvm.vp.fptosi.v4i32.v4f16(<4 x half>, <4 x i1>, i32) + +define <4 x i32> @vfptosi_v4i32_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_v4i32_v4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwcvt.rtz.x.f.v v9, v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %v = call <4 x i32> @llvm.vp.fptosi.v4i32.v4f16(<4 x half> %va, <4 x i1> %m, i32 %evl) + ret <4 x i32> %v +} + +define <4 x i32> @vfptosi_v4i32_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_v4i32_v4f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwcvt.rtz.x.f.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %v = call <4 x i32> @llvm.vp.fptosi.v4i32.v4f16(<4 x half> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) + ret <4 x i32> %v +} + +declare <4 x i64> @llvm.vp.fptosi.v4i64.v4f16(<4 x half>, <4 x i1>, i32) + +define <4 x i64> @vfptosi_v4i64_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_v4i64_v4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwcvt.f.f.v v10, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v10, v0.t +; CHECK-NEXT: ret + %v = call <4 x i64> @llvm.vp.fptosi.v4i64.v4f16(<4 x half> %va, <4 x i1> %m, i32 %evl) + ret <4 x i64> %v +} + +define <4 x i64> @vfptosi_v4i64_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_v4i64_v4f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwcvt.f.f.v v10, v8 +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v10 +; CHECK-NEXT: ret + %v = call <4 x i64> @llvm.vp.fptosi.v4i64.v4f16(<4 x half> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) + ret <4 x i64> %v +} + +declare <4 x i8> @llvm.vp.fptosi.v4i8.v4f32(<4 x float>, <4 x i1>, i32) + +define <4 x i8> @vfptosi_v4i8_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_v4i8_v4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-NEXT: vncvt.x.x.w v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <4 x i8> @llvm.vp.fptosi.v4i8.v4f32(<4 x float> %va, <4 x i1> %m, i32 %evl) + ret <4 x i8> %v +} + +define <4 x i8> @vfptosi_v4i8_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_v4i8_v4f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-NEXT: vncvt.x.x.w v8, v9 +; CHECK-NEXT: ret + %v = call <4 x i8> @llvm.vp.fptosi.v4i8.v4f32(<4 x float> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) + ret <4 x i8> %v +} + +declare <4 x i16> @llvm.vp.fptosi.v4i16.v4f32(<4 x float>, <4 x i1>, i32) + +define <4 x i16> @vfptosi_v4i16_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_v4i16_v4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %v = call <4 x i16> @llvm.vp.fptosi.v4i16.v4f32(<4 x float> %va, <4 x i1> %m, i32 %evl) + ret <4 x i16> %v +} + +define <4 x i16> @vfptosi_v4i16_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_v4i16_v4f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %v = call <4 x i16> @llvm.vp.fptosi.v4i16.v4f32(<4 x float> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) + ret <4 x i16> %v +} + +declare <4 x i32> @llvm.vp.fptosi.v4i32.v4f32(<4 x float>, <4 x i1>, i32) + +define <4 x i32> @vfptosi_v4i32_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_v4i32_v4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8, v0.t +; CHECK-NEXT: ret + %v = call <4 x i32> @llvm.vp.fptosi.v4i32.v4f32(<4 x float> %va, <4 x i1> %m, i32 %evl) + ret <4 x i32> %v +} + +define <4 x i32> @vfptosi_v4i32_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_v4i32_v4f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 +; CHECK-NEXT: ret + %v = call <4 x i32> @llvm.vp.fptosi.v4i32.v4f32(<4 x float> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) + ret <4 x i32> %v +} + +declare <4 x i64> @llvm.vp.fptosi.v4i64.v4f32(<4 x float>, <4 x i1>, i32) + +define <4 x i64> @vfptosi_v4i64_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_v4i64_v4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfwcvt.rtz.x.f.v v10, v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: ret + %v = call <4 x i64> @llvm.vp.fptosi.v4i64.v4f32(<4 x float> %va, <4 x i1> %m, i32 %evl) + ret <4 x i64> %v +} + +define <4 x i64> @vfptosi_v4i64_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_v4i64_v4f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfwcvt.rtz.x.f.v v10, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: ret + %v = call <4 x i64> @llvm.vp.fptosi.v4i64.v4f32(<4 x float> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) + ret <4 x i64> %v +} + +declare <4 x i8> @llvm.vp.fptosi.v4i8.v4f64(<4 x double>, <4 x i1>, i32) + +define <4 x i8> @vfptosi_v4i8_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_v4i8_v4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vncvt.x.x.w v8, v10, v0.t +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-NEXT: vncvt.x.x.w v8, v8, v0.t +; CHECK-NEXT: ret + %v = call <4 x i8> @llvm.vp.fptosi.v4i8.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl) + ret <4 x i8> %v +} + +define <4 x i8> @vfptosi_v4i8_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_v4i8_v4f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vncvt.x.x.w v8, v10 +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: ret + %v = call <4 x i8> @llvm.vp.fptosi.v4i8.v4f64(<4 x double> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) + ret <4 x i8> %v +} + +declare <4 x i16> @llvm.vp.fptosi.v4i16.v4f64(<4 x double>, <4 x i1>, i32) + +define <4 x i16> @vfptosi_v4i16_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_v4i16_v4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vncvt.x.x.w v8, v10, v0.t +; CHECK-NEXT: ret + %v = call <4 x i16> @llvm.vp.fptosi.v4i16.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl) + ret <4 x i16> %v +} + +define <4 x i16> @vfptosi_v4i16_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_v4i16_v4f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vncvt.x.x.w v8, v10 +; CHECK-NEXT: ret + %v = call <4 x i16> @llvm.vp.fptosi.v4i16.v4f64(<4 x double> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) + ret <4 x i16> %v +} + +declare <4 x i32> @llvm.vp.fptosi.v4i32.v4f64(<4 x double>, <4 x i1>, i32) + +define <4 x i32> @vfptosi_v4i32_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_v4i32_v4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8, v0.t +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret + %v = call <4 x i32> @llvm.vp.fptosi.v4i32.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl) + ret <4 x i32> %v +} + +define <4 x i32> @vfptosi_v4i32_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_v4i32_v4f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret + %v = call <4 x i32> @llvm.vp.fptosi.v4i32.v4f64(<4 x double> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) + ret <4 x i32> %v +} + +declare <4 x i64> @llvm.vp.fptosi.v4i64.v4f64(<4 x double>, <4 x i1>, i32) + +define <4 x i64> @vfptosi_v4i64_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_v4i64_v4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8, v0.t +; CHECK-NEXT: ret + %v = call <4 x i64> @llvm.vp.fptosi.v4i64.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl) + ret <4 x i64> %v +} + +define <4 x i64> @vfptosi_v4i64_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_v4i64_v4f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 +; CHECK-NEXT: ret + %v = call <4 x i64> @llvm.vp.fptosi.v4i64.v4f64(<4 x double> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) + ret <4 x i64> %v +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp-mask.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp-mask.ll @@ -0,0 +1,84 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zfh,+experimental-zvfh \ +; RUN: -riscv-v-vector-bits-min=128 < %s | FileCheck %s + +declare <4 x half> @llvm.vp.sitofp.v4f16.v4i1(<4 x i1>, <4 x i1>, i32) + +define <4 x half> @vsitofp_v4f16_v4i1(<4 x i1> %va, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_v4f16_v4i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmerge.vim v9, v9, -1, v0 +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vfcvt.f.x.v v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <4 x half> @llvm.vp.sitofp.v4f16.v4i1(<4 x i1> %va, <4 x i1> %m, i32 %evl) + ret <4 x half> %v +} + +define <4 x half> @vsitofp_v4f16_v4i1_unmasked(<4 x i1> %va, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_v4f16_v4i1_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret + %v = call <4 x half> @llvm.vp.sitofp.v4f16.v4i1(<4 x i1> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) + ret <4 x half> %v +} + +declare <4 x float> @llvm.vp.sitofp.v4f32.v4i1(<4 x i1>, <4 x i1>, i32) + +define <4 x float> @vsitofp_v4f32_v4i1(<4 x i1> %va, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_v4f32_v4i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmerge.vim v9, v9, -1, v0 +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vfcvt.f.x.v v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <4 x float> @llvm.vp.sitofp.v4f32.v4i1(<4 x i1> %va, <4 x i1> %m, i32 %evl) + ret <4 x float> %v +} + +define <4 x float> @vsitofp_v4f32_v4i1_unmasked(<4 x i1> %va, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_v4f32_v4i1_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret + %v = call <4 x float> @llvm.vp.sitofp.v4f32.v4i1(<4 x i1> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) + ret <4 x float> %v +} + +declare <4 x double> @llvm.vp.sitofp.v4f64.v4i1(<4 x i1>, <4 x i1>, i32) + +define <4 x double> @vsitofp_v4f64_v4i1(<4 x i1> %va, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_v4f64_v4i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vmv.v.i v10, 0 +; CHECK-NEXT: vmerge.vim v10, v10, -1, v0 +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vfcvt.f.x.v v8, v10, v0.t +; CHECK-NEXT: ret + %v = call <4 x double> @llvm.vp.sitofp.v4f64.v4i1(<4 x i1> %va, <4 x i1> %m, i32 %evl) + ret <4 x double> %v +} + +define <4 x double> @vsitofp_v4f64_v4i1_unmasked(<4 x i1> %va, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_v4f64_v4i1_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret + %v = call <4 x double> @llvm.vp.sitofp.v4f64.v4i1(<4 x i1> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) + ret <4 x double> %v +} diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp.ll @@ -0,0 +1,287 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zfh,+experimental-zvfh \ +; RUN: -riscv-v-vector-bits-min=128 < %s | FileCheck %s + +declare <4 x half> @llvm.vp.sitofp.v4f16.v4i8(<4 x i8>, <4 x i1>, i32) + +define <4 x half> @vsitofp_v4f16_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_v4f16_v4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v9, v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %v = call <4 x half> @llvm.vp.sitofp.v4f16.v4i8(<4 x i8> %va, <4 x i1> %m, i32 %evl) + ret <4 x half> %v +} + +define <4 x half> @vsitofp_v4f16_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_v4f16_v4i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %v = call <4 x half> @llvm.vp.sitofp.v4f16.v4i8(<4 x i8> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) + ret <4 x half> %v +} + +declare <4 x half> @llvm.vp.sitofp.v4f16.v4i16(<4 x i16>, <4 x i1>, i32) + +define <4 x half> @vsitofp_v4f16_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_v4f16_v4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t +; CHECK-NEXT: ret + %v = call <4 x half> @llvm.vp.sitofp.v4f16.v4i16(<4 x i16> %va, <4 x i1> %m, i32 %evl) + ret <4 x half> %v +} + +define <4 x half> @vsitofp_v4f16_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_v4f16_v4i16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret + %v = call <4 x half> @llvm.vp.sitofp.v4f16.v4i16(<4 x i16> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) + ret <4 x half> %v +} + +declare <4 x half> @llvm.vp.sitofp.v4f16.v4i32(<4 x i32>, <4 x i1>, i32) + +define <4 x half> @vsitofp_v4f16_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_v4f16_v4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v9, v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %v = call <4 x half> @llvm.vp.sitofp.v4f16.v4i32(<4 x i32> %va, <4 x i1> %m, i32 %evl) + ret <4 x half> %v +} + +define <4 x half> @vsitofp_v4f16_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_v4f16_v4i32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %v = call <4 x half> @llvm.vp.sitofp.v4f16.v4i32(<4 x i32> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) + ret <4 x half> %v +} + +declare <4 x half> @llvm.vp.sitofp.v4f16.v4i64(<4 x i64>, <4 x i1>, i32) + +define <4 x half> @vsitofp_v4f16_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_v4f16_v4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v10, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vfncvt.f.f.w v8, v10, v0.t +; CHECK-NEXT: ret + %v = call <4 x half> @llvm.vp.sitofp.v4f16.v4i64(<4 x i64> %va, <4 x i1> %m, i32 %evl) + ret <4 x half> %v +} + +define <4 x half> @vsitofp_v4f16_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_v4f16_v4i64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v10, v8 +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vfncvt.f.f.w v8, v10 +; CHECK-NEXT: ret + %v = call <4 x half> @llvm.vp.sitofp.v4f16.v4i64(<4 x i64> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) + ret <4 x half> %v +} + +declare <4 x float> @llvm.vp.sitofp.v4f32.v4i8(<4 x i8>, <4 x i1>, i32) + +define <4 x float> @vsitofp_v4f32_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_v4f32_v4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsext.vf2 v9, v8, v0.t +; CHECK-NEXT: vfwcvt.f.x.v v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <4 x float> @llvm.vp.sitofp.v4f32.v4i8(<4 x i8> %va, <4 x i1> %m, i32 %evl) + ret <4 x float> %v +} + +define <4 x float> @vsitofp_v4f32_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_v4f32_v4i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsext.vf2 v9, v8 +; CHECK-NEXT: vfwcvt.f.x.v v8, v9 +; CHECK-NEXT: ret + %v = call <4 x float> @llvm.vp.sitofp.v4f32.v4i8(<4 x i8> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) + ret <4 x float> %v +} + +declare <4 x float> @llvm.vp.sitofp.v4f32.v4i16(<4 x i16>, <4 x i1>, i32) + +define <4 x float> @vsitofp_v4f32_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_v4f32_v4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v9, v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %v = call <4 x float> @llvm.vp.sitofp.v4f32.v4i16(<4 x i16> %va, <4 x i1> %m, i32 %evl) + ret <4 x float> %v +} + +define <4 x float> @vsitofp_v4f32_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_v4f32_v4i16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %v = call <4 x float> @llvm.vp.sitofp.v4f32.v4i16(<4 x i16> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) + ret <4 x float> %v +} + +declare <4 x float> @llvm.vp.sitofp.v4f32.v4i32(<4 x i32>, <4 x i1>, i32) + +define <4 x float> @vsitofp_v4f32_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_v4f32_v4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t +; CHECK-NEXT: ret + %v = call <4 x float> @llvm.vp.sitofp.v4f32.v4i32(<4 x i32> %va, <4 x i1> %m, i32 %evl) + ret <4 x float> %v +} + +define <4 x float> @vsitofp_v4f32_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_v4f32_v4i32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret + %v = call <4 x float> @llvm.vp.sitofp.v4f32.v4i32(<4 x i32> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) + ret <4 x float> %v +} + +declare <4 x float> @llvm.vp.sitofp.v4f32.v4i64(<4 x i64>, <4 x i1>, i32) + +define <4 x float> @vsitofp_v4f32_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_v4f32_v4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v10, v8, v0.t +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret + %v = call <4 x float> @llvm.vp.sitofp.v4f32.v4i64(<4 x i64> %va, <4 x i1> %m, i32 %evl) + ret <4 x float> %v +} + +define <4 x float> @vsitofp_v4f32_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_v4f32_v4i64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v10, v8 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret + %v = call <4 x float> @llvm.vp.sitofp.v4f32.v4i64(<4 x i64> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) + ret <4 x float> %v +} + +declare <4 x double> @llvm.vp.sitofp.v4f64.v4i8(<4 x i8>, <4 x i1>, i32) + +define <4 x double> @vsitofp_v4f64_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_v4f64_v4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsext.vf4 v10, v8, v0.t +; CHECK-NEXT: vfwcvt.f.x.v v8, v10, v0.t +; CHECK-NEXT: ret + %v = call <4 x double> @llvm.vp.sitofp.v4f64.v4i8(<4 x i8> %va, <4 x i1> %m, i32 %evl) + ret <4 x double> %v +} + +define <4 x double> @vsitofp_v4f64_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_v4f64_v4i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsext.vf4 v10, v8 +; CHECK-NEXT: vfwcvt.f.x.v v8, v10 +; CHECK-NEXT: ret + %v = call <4 x double> @llvm.vp.sitofp.v4f64.v4i8(<4 x i8> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) + ret <4 x double> %v +} + +declare <4 x double> @llvm.vp.sitofp.v4f64.v4i16(<4 x i16>, <4 x i1>, i32) + +define <4 x double> @vsitofp_v4f64_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_v4f64_v4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsext.vf2 v10, v8, v0.t +; CHECK-NEXT: vfwcvt.f.x.v v8, v10, v0.t +; CHECK-NEXT: ret + %v = call <4 x double> @llvm.vp.sitofp.v4f64.v4i16(<4 x i16> %va, <4 x i1> %m, i32 %evl) + ret <4 x double> %v +} + +define <4 x double> @vsitofp_v4f64_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_v4f64_v4i16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsext.vf2 v10, v8 +; CHECK-NEXT: vfwcvt.f.x.v v8, v10 +; CHECK-NEXT: ret + %v = call <4 x double> @llvm.vp.sitofp.v4f64.v4i16(<4 x i16> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) + ret <4 x double> %v +} + +declare <4 x double> @llvm.vp.sitofp.v4f64.v4i32(<4 x i32>, <4 x i1>, i32) + +define <4 x double> @vsitofp_v4f64_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_v4f64_v4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v10, v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: ret + %v = call <4 x double> @llvm.vp.sitofp.v4f64.v4i32(<4 x i32> %va, <4 x i1> %m, i32 %evl) + ret <4 x double> %v +} + +define <4 x double> @vsitofp_v4f64_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_v4f64_v4i32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v10, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: ret + %v = call <4 x double> @llvm.vp.sitofp.v4f64.v4i32(<4 x i32> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) + ret <4 x double> %v +} + +declare <4 x double> @llvm.vp.sitofp.v4f64.v4i64(<4 x i64>, <4 x i1>, i32) + +define <4 x double> @vsitofp_v4f64_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_v4f64_v4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t +; CHECK-NEXT: ret + %v = call <4 x double> @llvm.vp.sitofp.v4f64.v4i64(<4 x i64> %va, <4 x i1> %m, i32 %evl) + ret <4 x double> %v +} + +define <4 x double> @vsitofp_v4f64_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_v4f64_v4i64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret + %v = call <4 x double> @llvm.vp.sitofp.v4f64.v4i64(<4 x i64> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) + ret <4 x double> %v +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll @@ -0,0 +1,320 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zfh,+experimental-zvfh < %s | FileCheck %s + +declare @llvm.vp.fptosi.nxv2i8.nxv2f16(, , i32) + +define @vfptosi_nxv2i8_nxv2f16( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_nxv2i8_nxv2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %v = call @llvm.vp.fptosi.nxv2i8.nxv2f16( %va, %m, i32 %evl) + ret %v +} + +define @vfptosi_nxv2i8_nxv2f16_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_nxv2i8_nxv2f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i16 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.fptosi.nxv2i8.nxv2f16( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + ret %v +} + +declare @llvm.vp.fptosi.nxv2i16.nxv2f16(, , i32) + +define @vfptosi_nxv2i16_nxv2f16( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_nxv2i16_nxv2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fptosi.nxv2i16.nxv2f16( %va, %m, i32 %evl) + ret %v +} + +define @vfptosi_nxv2i16_nxv2f16_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_nxv2i16_nxv2f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i16 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.fptosi.nxv2i16.nxv2f16( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + ret %v +} + +declare @llvm.vp.fptosi.nxv2i32.nxv2f16(, , i32) + +define @vfptosi_nxv2i32_nxv2f16( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_nxv2i32_nxv2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwcvt.rtz.x.f.v v9, v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %v = call @llvm.vp.fptosi.nxv2i32.nxv2f16( %va, %m, i32 %evl) + ret %v +} + +define @vfptosi_nxv2i32_nxv2f16_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_nxv2i32_nxv2f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwcvt.rtz.x.f.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.fptosi.nxv2i32.nxv2f16( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + ret %v +} + +declare @llvm.vp.fptosi.nxv2i64.nxv2f16(, , i32) + +define @vfptosi_nxv2i64_nxv2f16( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_nxv2i64_nxv2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwcvt.f.f.v v10, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v10, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fptosi.nxv2i64.nxv2f16( %va, %m, i32 %evl) + ret %v +} + +define @vfptosi_nxv2i64_nxv2f16_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_nxv2i64_nxv2f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwcvt.f.f.v v10, v8 +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v10 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i64 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.fptosi.nxv2i64.nxv2f16( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + ret %v +} + +declare @llvm.vp.fptosi.nxv2i8.nxv2f32(, , i32) + +define @vfptosi_nxv2i8_nxv2f32( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_nxv2i8_nxv2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-NEXT: vncvt.x.x.w v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fptosi.nxv2i8.nxv2f32( %va, %m, i32 %evl) + ret %v +} + +define @vfptosi_nxv2i8_nxv2f32_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_nxv2i8_nxv2f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-NEXT: vncvt.x.x.w v8, v9 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i16 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.fptosi.nxv2i8.nxv2f32( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + ret %v +} + +declare @llvm.vp.fptosi.nxv2i16.nxv2f32(, , i32) + +define @vfptosi_nxv2i16_nxv2f32( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_nxv2i16_nxv2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %v = call @llvm.vp.fptosi.nxv2i16.nxv2f32( %va, %m, i32 %evl) + ret %v +} + +define @vfptosi_nxv2i16_nxv2f32_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_nxv2i16_nxv2f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i16 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.fptosi.nxv2i16.nxv2f32( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + ret %v +} + +declare @llvm.vp.fptosi.nxv2i32.nxv2f32(, , i32) + +define @vfptosi_nxv2i32_nxv2f32( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_nxv2i32_nxv2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fptosi.nxv2i32.nxv2f32( %va, %m, i32 %evl) + ret %v +} + +define @vfptosi_nxv2i32_nxv2f32_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_nxv2i32_nxv2f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.fptosi.nxv2i32.nxv2f32( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + ret %v +} + +declare @llvm.vp.fptosi.nxv2i64.nxv2f32(, , i32) + +define @vfptosi_nxv2i64_nxv2f32( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_nxv2i64_nxv2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfwcvt.rtz.x.f.v v10, v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: ret + %v = call @llvm.vp.fptosi.nxv2i64.nxv2f32( %va, %m, i32 %evl) + ret %v +} + +define @vfptosi_nxv2i64_nxv2f32_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_nxv2i64_nxv2f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfwcvt.rtz.x.f.v v10, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i64 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.fptosi.nxv2i64.nxv2f32( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + ret %v +} + +declare @llvm.vp.fptosi.nxv2i8.nxv2f64(, , i32) + +define @vfptosi_nxv2i8_nxv2f64( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_nxv2i8_nxv2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vncvt.x.x.w v8, v10, v0.t +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-NEXT: vncvt.x.x.w v8, v8, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fptosi.nxv2i8.nxv2f64( %va, %m, i32 %evl) + ret %v +} + +define @vfptosi_nxv2i8_nxv2f64_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_nxv2i8_nxv2f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vncvt.x.x.w v8, v10 +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-NEXT: vncvt.x.x.w v8, v8 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i16 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.fptosi.nxv2i8.nxv2f64( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + ret %v +} + +declare @llvm.vp.fptosi.nxv2i16.nxv2f64(, , i32) + +define @vfptosi_nxv2i16_nxv2f64( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_nxv2i16_nxv2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vncvt.x.x.w v8, v10, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fptosi.nxv2i16.nxv2f64( %va, %m, i32 %evl) + ret %v +} + +define @vfptosi_nxv2i16_nxv2f64_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_nxv2i16_nxv2f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vncvt.x.x.w v8, v10 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i16 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.fptosi.nxv2i16.nxv2f64( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + ret %v +} + +declare @llvm.vp.fptosi.nxv2i32.nxv2f64(, , i32) + +define @vfptosi_nxv2i32_nxv2f64( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_nxv2i32_nxv2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8, v0.t +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret + %v = call @llvm.vp.fptosi.nxv2i32.nxv2f64( %va, %m, i32 %evl) + ret %v +} + +define @vfptosi_nxv2i32_nxv2f64_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_nxv2i32_nxv2f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.fptosi.nxv2i32.nxv2f64( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + ret %v +} + +declare @llvm.vp.fptosi.nxv2i64.nxv2f64(, , i32) + +define @vfptosi_nxv2i64_nxv2f64( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_nxv2i64_nxv2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fptosi.nxv2i64.nxv2f64( %va, %m, i32 %evl) + ret %v +} + +define @vfptosi_nxv2i64_nxv2f64_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vfptosi_nxv2i64_nxv2f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i64 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.fptosi.nxv2i64.nxv2f64( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + ret %v +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp-mask.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp-mask.ll @@ -0,0 +1,89 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zfh,+experimental-zvfh < %s | FileCheck %s + +declare @llvm.vp.sitofp.nxv2f16.nxv2i1(, , i32) + +define @vsitofp_nxv2f16_nxv2i1( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_nxv2f16_nxv2i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmerge.vim v9, v9, -1, v0 +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vfcvt.f.x.v v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.sitofp.nxv2f16.nxv2i1( %va, %m, i32 %evl) + ret %v +} + +define @vsitofp_nxv2f16_nxv2i1_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_nxv2f16_nxv2i1_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i1 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.sitofp.nxv2f16.nxv2i1( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + ret %v +} + +declare @llvm.vp.sitofp.nxv2f32.nxv2i1(, , i32) + +define @vsitofp_nxv2f32_nxv2i1( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_nxv2f32_nxv2i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmerge.vim v9, v9, -1, v0 +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vfcvt.f.x.v v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.sitofp.nxv2f32.nxv2i1( %va, %m, i32 %evl) + ret %v +} + +define @vsitofp_nxv2f32_nxv2i1_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_nxv2f32_nxv2i1_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i1 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.sitofp.nxv2f32.nxv2i1( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + ret %v +} + +declare @llvm.vp.sitofp.nxv2f64.nxv2i1(, , i32) + +define @vsitofp_nxv2f64_nxv2i1( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_nxv2f64_nxv2i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vmv.v.i v10, 0 +; CHECK-NEXT: vmerge.vim v10, v10, -1, v0 +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vfcvt.f.x.v v8, v10, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.sitofp.nxv2f64.nxv2i1( %va, %m, i32 %evl) + ret %v +} + +define @vsitofp_nxv2f64_nxv2i1_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_nxv2f64_nxv2i1_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i1 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.sitofp.nxv2f64.nxv2i1( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + ret %v +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll @@ -0,0 +1,310 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zfh,+experimental-zvfh < %s | FileCheck %s + +declare @llvm.vp.sitofp.nxv2f16.nxv2i8(, , i32) + +define @vsitofp_nxv2f16_nxv2i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_nxv2f16_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v9, v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %v = call @llvm.vp.sitofp.nxv2f16.nxv2i8( %va, %m, i32 %evl) + ret %v +} + +define @vsitofp_nxv2f16_nxv2i8_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_nxv2f16_nxv2i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i8 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.sitofp.nxv2f16.nxv2i8( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + ret %v +} + +declare @llvm.vp.sitofp.nxv2f16.nxv2i16(, , i32) + +define @vsitofp_nxv2f16_nxv2i16( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_nxv2f16_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.sitofp.nxv2f16.nxv2i16( %va, %m, i32 %evl) + ret %v +} + +define @vsitofp_nxv2f16_nxv2i16_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_nxv2f16_nxv2i16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i16 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.sitofp.nxv2f16.nxv2i16( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + ret %v +} + +declare @llvm.vp.sitofp.nxv2f16.nxv2i32(, , i32) + +define @vsitofp_nxv2f16_nxv2i32( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_nxv2f16_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v9, v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %v = call @llvm.vp.sitofp.nxv2f16.nxv2i32( %va, %m, i32 %evl) + ret %v +} + +define @vsitofp_nxv2f16_nxv2i32_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_nxv2f16_nxv2i32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.sitofp.nxv2f16.nxv2i32( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + ret %v +} + +declare @llvm.vp.sitofp.nxv2f16.nxv2i64(, , i32) + +define @vsitofp_nxv2f16_nxv2i64( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_nxv2f16_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v10, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vfncvt.f.f.w v8, v10, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.sitofp.nxv2f16.nxv2i64( %va, %m, i32 %evl) + ret %v +} + +define @vsitofp_nxv2f16_nxv2i64_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_nxv2f16_nxv2i64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v10, v8 +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vfncvt.f.f.w v8, v10 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i64 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.sitofp.nxv2f16.nxv2i64( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + ret %v +} + +declare @llvm.vp.sitofp.nxv2f32.nxv2i8(, , i32) + +define @vsitofp_nxv2f32_nxv2i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_nxv2f32_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsext.vf2 v9, v8, v0.t +; CHECK-NEXT: vfwcvt.f.x.v v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.sitofp.nxv2f32.nxv2i8( %va, %m, i32 %evl) + ret %v +} + +define @vsitofp_nxv2f32_nxv2i8_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_nxv2f32_nxv2i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsext.vf2 v9, v8 +; CHECK-NEXT: vfwcvt.f.x.v v8, v9 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i8 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.sitofp.nxv2f32.nxv2i8( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + ret %v +} + +declare @llvm.vp.sitofp.nxv2f32.nxv2i16(, , i32) + +define @vsitofp_nxv2f32_nxv2i16( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_nxv2f32_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v9, v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %v = call @llvm.vp.sitofp.nxv2f32.nxv2i16( %va, %m, i32 %evl) + ret %v +} + +define @vsitofp_nxv2f32_nxv2i16_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_nxv2f32_nxv2i16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i16 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.sitofp.nxv2f32.nxv2i16( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + ret %v +} + +declare @llvm.vp.sitofp.nxv2f32.nxv2i32(, , i32) + +define @vsitofp_nxv2f32_nxv2i32( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_nxv2f32_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.sitofp.nxv2f32.nxv2i32( %va, %m, i32 %evl) + ret %v +} + +define @vsitofp_nxv2f32_nxv2i32_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_nxv2f32_nxv2i32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.sitofp.nxv2f32.nxv2i32( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + ret %v +} + +declare @llvm.vp.sitofp.nxv2f32.nxv2i64(, , i32) + +define @vsitofp_nxv2f32_nxv2i64( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_nxv2f32_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v10, v8, v0.t +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret + %v = call @llvm.vp.sitofp.nxv2f32.nxv2i64( %va, %m, i32 %evl) + ret %v +} + +define @vsitofp_nxv2f32_nxv2i64_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_nxv2f32_nxv2i64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v10, v8 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i64 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.sitofp.nxv2f32.nxv2i64( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + ret %v +} + +declare @llvm.vp.sitofp.nxv2f64.nxv2i8(, , i32) + +define @vsitofp_nxv2f64_nxv2i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_nxv2f64_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsext.vf4 v10, v8, v0.t +; CHECK-NEXT: vfwcvt.f.x.v v8, v10, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.sitofp.nxv2f64.nxv2i8( %va, %m, i32 %evl) + ret %v +} + +define @vsitofp_nxv2f64_nxv2i8_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_nxv2f64_nxv2i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsext.vf4 v10, v8 +; CHECK-NEXT: vfwcvt.f.x.v v8, v10 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i8 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.sitofp.nxv2f64.nxv2i8( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + ret %v +} + +declare @llvm.vp.sitofp.nxv2f64.nxv2i16(, , i32) + +define @vsitofp_nxv2f64_nxv2i16( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_nxv2f64_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsext.vf2 v10, v8, v0.t +; CHECK-NEXT: vfwcvt.f.x.v v8, v10, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.sitofp.nxv2f64.nxv2i16( %va, %m, i32 %evl) + ret %v +} + +define @vsitofp_nxv2f64_nxv2i16_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_nxv2f64_nxv2i16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsext.vf2 v10, v8 +; CHECK-NEXT: vfwcvt.f.x.v v8, v10 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i16 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.sitofp.nxv2f64.nxv2i16( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + ret %v +} + +declare @llvm.vp.sitofp.nxv2f64.nxv2i32(, , i32) + +define @vsitofp_nxv2f64_nxv2i32( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_nxv2f64_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v10, v8, v0.t +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: ret + %v = call @llvm.vp.sitofp.nxv2f64.nxv2i32( %va, %m, i32 %evl) + ret %v +} + +define @vsitofp_nxv2f64_nxv2i32_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_nxv2f64_nxv2i32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v10, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.sitofp.nxv2f64.nxv2i32( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + ret %v +} + +declare @llvm.vp.sitofp.nxv2f64.nxv2i64(, , i32) + +define @vsitofp_nxv2f64_nxv2i64( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_nxv2f64_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.sitofp.nxv2f64.nxv2i64( %va, %m, i32 %evl) + ret %v +} + +define @vsitofp_nxv2f64_nxv2i64_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vsitofp_nxv2f64_nxv2i64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i64 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.sitofp.nxv2f64.nxv2i64( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + ret %v +}