Index: llvm/lib/Target/RISCV/RISCVISelLowering.h =================================================================== --- llvm/lib/Target/RISCV/RISCVISelLowering.h +++ llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -684,6 +684,8 @@ SDValue lowerVPSetCCMaskOp(SDValue Op, SelectionDAG &DAG) const; SDValue lowerVPFPIntConvOp(SDValue Op, SelectionDAG &DAG, unsigned RISCVISDOpc) const; + SDValue lowerVPStridedLoad(SDValue Op, SelectionDAG &DAG) const; + SDValue lowerVPStridedStore(SDValue Op, SelectionDAG &DAG) const; SDValue lowerFixedLengthVectorExtendToRVV(SDValue Op, SelectionDAG &DAG, unsigned ExtendOpc) const; SDValue lowerGET_ROUNDING(SDValue Op, SelectionDAG &DAG) const; Index: llvm/lib/Target/RISCV/RISCVISelLowering.cpp =================================================================== --- llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -596,8 +596,9 @@ VT, Custom); setOperationAction( - {ISD::VP_LOAD, ISD::VP_STORE, ISD::VP_GATHER, ISD::VP_SCATTER}, VT, - Custom); + {ISD::VP_LOAD, ISD::VP_STORE, ISD::EXPERIMENTAL_VP_STRIDED_LOAD, + ISD::EXPERIMENTAL_VP_STRIDED_STORE, ISD::VP_GATHER, ISD::VP_SCATTER}, + VT, Custom); setOperationAction( {ISD::CONCAT_VECTORS, ISD::INSERT_SUBVECTOR, ISD::EXTRACT_SUBVECTOR}, @@ -691,8 +692,9 @@ VT, Custom); setOperationAction( - {ISD::VP_LOAD, ISD::VP_STORE, ISD::VP_GATHER, ISD::VP_SCATTER}, VT, - Custom); + {ISD::VP_LOAD, ISD::VP_STORE, ISD::EXPERIMENTAL_VP_STRIDED_LOAD, + ISD::EXPERIMENTAL_VP_STRIDED_STORE, ISD::VP_GATHER, ISD::VP_SCATTER}, + VT, Custom); setOperationAction(ISD::SELECT, VT, Custom); setOperationAction(ISD::SELECT_CC, VT, Expand); @@ -817,9 +819,11 @@ setOperationAction( {ISD::MLOAD, ISD::MSTORE, ISD::MGATHER, ISD::MSCATTER}, VT, Custom); - setOperationAction( - {ISD::VP_LOAD, ISD::VP_STORE, ISD::VP_GATHER, ISD::VP_SCATTER}, VT, - Custom); + setOperationAction({ISD::VP_LOAD, ISD::VP_STORE, + ISD::EXPERIMENTAL_VP_STRIDED_LOAD, + ISD::EXPERIMENTAL_VP_STRIDED_STORE, ISD::VP_GATHER, + ISD::VP_SCATTER}, + VT, Custom); setOperationAction({ISD::ADD, ISD::MUL, ISD::SUB, ISD::AND, ISD::OR, ISD::XOR, ISD::SDIV, ISD::SREM, ISD::UDIV, @@ -890,9 +894,11 @@ ISD::MGATHER, ISD::MSCATTER}, VT, Custom); - setOperationAction( - {ISD::VP_LOAD, ISD::VP_STORE, ISD::VP_GATHER, ISD::VP_SCATTER}, VT, - Custom); + setOperationAction({ISD::VP_LOAD, ISD::VP_STORE, + ISD::EXPERIMENTAL_VP_STRIDED_LOAD, + ISD::EXPERIMENTAL_VP_STRIDED_STORE, ISD::VP_GATHER, + ISD::VP_SCATTER}, + VT, Custom); setOperationAction({ISD::FADD, ISD::FSUB, ISD::FMUL, ISD::FDIV, ISD::FNEG, ISD::FABS, ISD::FCOPYSIGN, ISD::FSQRT, @@ -3623,6 +3629,10 @@ if (Op.getOperand(0).getSimpleValueType().getVectorElementType() == MVT::i1) return lowerVPSetCCMaskOp(Op, DAG); return lowerVPOp(Op, DAG, RISCVISD::SETCC_VL); + case ISD::EXPERIMENTAL_VP_STRIDED_LOAD: + return lowerVPStridedLoad(Op, DAG); + case ISD::EXPERIMENTAL_VP_STRIDED_STORE: + return lowerVPStridedStore(Op, DAG); } } @@ -6435,6 +6445,89 @@ return convertFromScalableVector(VT, Val, DAG, Subtarget); } +SDValue RISCVTargetLowering::lowerVPStridedLoad(SDValue Op, + SelectionDAG &DAG) const { + SDLoc DL(Op); + MVT XLenVT = Subtarget.getXLenVT(); + MVT VT = Op.getSimpleValueType(); + MVT ContainerVT = VT; + if (VT.isFixedLengthVector()) + ContainerVT = getContainerForFixedLengthVector(VT); + + SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other}); + + auto *VPNode = cast(Op); + // Check if the mask is known to be all ones + SDValue Mask = VPNode->getMask(); + bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode()); + + SDValue IntID = DAG.getTargetConstant(IsUnmasked ? Intrinsic::riscv_vlse + : Intrinsic::riscv_vlse_mask, + DL, XLenVT); + SmallVector Ops{VPNode->getChain(), IntID, + DAG.getUNDEF(ContainerVT), VPNode->getBasePtr(), + VPNode->getStride()}; + if (!IsUnmasked) { + if (VT.isFixedLengthVector()) { + MVT MaskVT = ContainerVT.changeVectorElementType(MVT::i1); + Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget); + } + Ops.push_back(Mask); + } + Ops.push_back(VPNode->getVectorLength()); + if (!IsUnmasked) { + SDValue Policy = DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT); + Ops.push_back(Policy); + } + + SDValue Result = + DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, + VPNode->getMemoryVT(), VPNode->getMemOperand()); + SDValue Chain = Result.getValue(1); + + if (VT.isFixedLengthVector()) + Result = convertFromScalableVector(VT, Result, DAG, Subtarget); + + return DAG.getMergeValues({Result, Chain}, DL); +} + +SDValue RISCVTargetLowering::lowerVPStridedStore(SDValue Op, + SelectionDAG &DAG) const { + SDLoc DL(Op); + MVT XLenVT = Subtarget.getXLenVT(); + + auto *VPNode = cast(Op); + SDValue StoreVal = VPNode->getValue(); + MVT VT = StoreVal.getSimpleValueType(); + MVT ContainerVT = VT; + if (VT.isFixedLengthVector()) { + ContainerVT = getContainerForFixedLengthVector(VT); + StoreVal = convertToScalableVector(ContainerVT, StoreVal, DAG, Subtarget); + } + + // Check if the mask is known to be all ones + SDValue Mask = VPNode->getMask(); + bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode()); + + SDValue IntID = DAG.getTargetConstant(IsUnmasked ? Intrinsic::riscv_vsse + : Intrinsic::riscv_vsse_mask, + DL, XLenVT); + SmallVector Ops{VPNode->getChain(), IntID, StoreVal, + VPNode->getBasePtr(), VPNode->getStride()}; + if (!IsUnmasked) { + if (VT.isFixedLengthVector()) { + MVT MaskVT = ContainerVT.changeVectorElementType(MVT::i1); + Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget); + } + Ops.push_back(Mask); + } + Ops.push_back(VPNode->getVectorLength()); + + return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL, VPNode->getVTList(), + Ops, VPNode->getMemoryVT(), + VPNode->getMemOperand()); +} + // Custom lower MGATHER/VP_GATHER to a legalized form for RVV. It will then be // matched to a RVV indexed load. The RVV indexed load instructions only // support the "unsigned unscaled" addressing mode; indices are implicitly Index: llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll @@ -0,0 +1,565 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+v,+experimental-zvfh \ +; RUN: -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \ +; RUN: | FileCheck %s --check-prefixes=CHECK-RV32 +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+v,+experimental-zvfh \ +; RUN: -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \ +; RUN: | FileCheck %s --check-prefixes=CHECK-RV64 + +declare <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0i8.i8(i8*, i8, <2 x i1>, i32) + +define <2 x i8> @strided_vpload_v2i8_i8(i8* %ptr, i8 signext %stride, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_v2i8_i8: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV32-NEXT: vlse8.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_v2i8_i8: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV64-NEXT: vlse8.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + %load = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0i8.i8(i8* %ptr, i8 %stride, <2 x i1> %m, i32 %evl) + ret <2 x i8> %load +} + +declare <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0i8.i16(i8*, i16, <2 x i1>, i32) + +define <2 x i8> @strided_vpload_v2i8_i16(i8* %ptr, i16 signext %stride, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_v2i8_i16: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV32-NEXT: vlse8.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_v2i8_i16: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV64-NEXT: vlse8.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + %load = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0i8.i16(i8* %ptr, i16 %stride, <2 x i1> %m, i32 %evl) + ret <2 x i8> %load +} + +declare <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0i8.i64(i8*, i64, <2 x i1>, i32) + +define <2 x i8> @strided_vpload_v2i8_i64(i8* %ptr, i64 signext %stride, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_v2i8_i64: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a3, e8, mf8, ta, mu +; CHECK-RV32-NEXT: vlse8.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_v2i8_i64: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV64-NEXT: vlse8.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + %load = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0i8.i64(i8* %ptr, i64 %stride, <2 x i1> %m, i32 %evl) + ret <2 x i8> %load +} + +declare <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0i8.i32(i8*, i32, <2 x i1>, i32) + +define <2 x i8> @strided_vpload_v2i8(i8* %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_v2i8: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV32-NEXT: vlse8.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_v2i8: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV64-NEXT: vlse8.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + %load = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0i8.i32(i8* %ptr, i32 %stride, <2 x i1> %m, i32 %evl) + ret <2 x i8> %load +} + +declare <4 x i8> @llvm.experimental.vp.strided.load.v4i8.p0i8.i32(i8*, i32, <4 x i1>, i32) + +define <4 x i8> @strided_vpload_v4i8(i8* %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_v4i8: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf4, ta, mu +; CHECK-RV32-NEXT: vlse8.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_v4i8: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf4, ta, mu +; CHECK-RV64-NEXT: vlse8.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + %load = call <4 x i8> @llvm.experimental.vp.strided.load.v4i8.p0i8.i32(i8* %ptr, i32 %stride, <4 x i1> %m, i32 %evl) + ret <4 x i8> %load +} + +define <4 x i8> @strided_vpload_v4i8_allones_mask(i8* %ptr, i32 signext %stride, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_v4i8_allones_mask: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf4, ta, mu +; CHECK-RV32-NEXT: vlse8.v v8, (a0), a1 +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_v4i8_allones_mask: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf4, ta, mu +; CHECK-RV64-NEXT: vlse8.v v8, (a0), a1 +; CHECK-RV64-NEXT: ret + %a = insertelement <4 x i1> poison, i1 true, i32 0 + %b = shufflevector <4 x i1> %a, <4 x i1> poison, <4 x i32> zeroinitializer + %load = call <4 x i8> @llvm.experimental.vp.strided.load.v4i8.p0i8.i32(i8* %ptr, i32 %stride, <4 x i1> %b, i32 %evl) + ret <4 x i8> %load +} + +declare <8 x i8> @llvm.experimental.vp.strided.load.v8i8.p0i8.i32(i8*, i32, <8 x i1>, i32) + +define <8 x i8> @strided_vpload_v8i8(i8* %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_v8i8: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf2, ta, mu +; CHECK-RV32-NEXT: vlse8.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_v8i8: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf2, ta, mu +; CHECK-RV64-NEXT: vlse8.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + %load = call <8 x i8> @llvm.experimental.vp.strided.load.v8i8.p0i8.i32(i8* %ptr, i32 %stride, <8 x i1> %m, i32 %evl) + ret <8 x i8> %load +} + +declare <2 x i16> @llvm.experimental.vp.strided.load.v2i16.p0i16.i32(i16*, i32, <2 x i1>, i32) + +define <2 x i16> @strided_vpload_v2i16(i16* %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_v2i16: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_v2i16: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + %load = call <2 x i16> @llvm.experimental.vp.strided.load.v2i16.p0i16.i32(i16* %ptr, i32 %stride, <2 x i1> %m, i32 %evl) + ret <2 x i16> %load +} + +declare <4 x i16> @llvm.experimental.vp.strided.load.v4i16.p0i16.i32(i16*, i32, <4 x i1>, i32) + +define <4 x i16> @strided_vpload_v4i16(i16* %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_v4i16: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_v4i16: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + %load = call <4 x i16> @llvm.experimental.vp.strided.load.v4i16.p0i16.i32(i16* %ptr, i32 %stride, <4 x i1> %m, i32 %evl) + ret <4 x i16> %load +} + +declare <8 x i16> @llvm.experimental.vp.strided.load.v8i16.p0i16.i32(i16*, i32, <8 x i1>, i32) + +define <8 x i16> @strided_vpload_v8i16(i16* %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_v8i16: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_v8i16: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + %load = call <8 x i16> @llvm.experimental.vp.strided.load.v8i16.p0i16.i32(i16* %ptr, i32 %stride, <8 x i1> %m, i32 %evl) + ret <8 x i16> %load +} + +define <8 x i16> @strided_vpload_v8i16_allones_mask(i16* %ptr, i32 signext %stride, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_v8i16_allones_mask: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1 +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_v8i16_allones_mask: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1 +; CHECK-RV64-NEXT: ret + %a = insertelement <8 x i1> poison, i1 true, i32 0 + %b = shufflevector <8 x i1> %a, <8 x i1> poison, <8 x i32> zeroinitializer + %load = call <8 x i16> @llvm.experimental.vp.strided.load.v8i16.p0i16.i32(i16* %ptr, i32 %stride, <8 x i1> %b, i32 %evl) + ret <8 x i16> %load +} + +declare <2 x i32> @llvm.experimental.vp.strided.load.v2i32.p0i32.i32(i32*, i32, <2 x i1>, i32) + +define <2 x i32> @strided_vpload_v2i32(i32* %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_v2i32: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_v2i32: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + %load = call <2 x i32> @llvm.experimental.vp.strided.load.v2i32.p0i32.i32(i32* %ptr, i32 %stride, <2 x i1> %m, i32 %evl) + ret <2 x i32> %load +} + +declare <4 x i32> @llvm.experimental.vp.strided.load.v4i32.p0i32.i32(i32*, i32, <4 x i1>, i32) + +define <4 x i32> @strided_vpload_v4i32(i32* %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_v4i32: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_v4i32: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + %load = call <4 x i32> @llvm.experimental.vp.strided.load.v4i32.p0i32.i32(i32* %ptr, i32 %stride, <4 x i1> %m, i32 %evl) + ret <4 x i32> %load +} + +declare <8 x i32> @llvm.experimental.vp.strided.load.v8i32.p0i32.i32(i32*, i32, <8 x i1>, i32) + +define <8 x i32> @strided_vpload_v8i32(i32* %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_v8i32: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_v8i32: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + %load = call <8 x i32> @llvm.experimental.vp.strided.load.v8i32.p0i32.i32(i32* %ptr, i32 %stride, <8 x i1> %m, i32 %evl) + ret <8 x i32> %load +} + +define <8 x i32> @strided_vpload_v8i32_allones_mask(i32* %ptr, i32 signext %stride, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_v8i32_allones_mask: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1 +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_v8i32_allones_mask: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1 +; CHECK-RV64-NEXT: ret + %a = insertelement <8 x i1> poison, i1 true, i32 0 + %b = shufflevector <8 x i1> %a, <8 x i1> poison, <8 x i32> zeroinitializer + %load = call <8 x i32> @llvm.experimental.vp.strided.load.v8i32.p0i32.i32(i32* %ptr, i32 %stride, <8 x i1> %b, i32 %evl) + ret <8 x i32> %load +} + +declare <2 x i64> @llvm.experimental.vp.strided.load.v2i64.p0i64.i32(i64*, i32, <2 x i1>, i32) + +define <2 x i64> @strided_vpload_v2i64(i64* %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_v2i64: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_v2i64: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + %load = call <2 x i64> @llvm.experimental.vp.strided.load.v2i64.p0i64.i32(i64* %ptr, i32 %stride, <2 x i1> %m, i32 %evl) + ret <2 x i64> %load +} + +declare <4 x i64> @llvm.experimental.vp.strided.load.v4i64.p0i64.i32(i64*, i32, <4 x i1>, i32) + +define <4 x i64> @strided_vpload_v4i64(i64* %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_v4i64: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_v4i64: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + %load = call <4 x i64> @llvm.experimental.vp.strided.load.v4i64.p0i64.i32(i64* %ptr, i32 %stride, <4 x i1> %m, i32 %evl) + ret <4 x i64> %load +} + +define <4 x i64> @strided_vpload_v4i64_allones_mask(i64* %ptr, i32 signext %stride, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_v4i64_allones_mask: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1 +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_v4i64_allones_mask: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1 +; CHECK-RV64-NEXT: ret + %a = insertelement <4 x i1> poison, i1 true, i32 0 + %b = shufflevector <4 x i1> %a, <4 x i1> poison, <4 x i32> zeroinitializer + %load = call <4 x i64> @llvm.experimental.vp.strided.load.v4i64.p0i64.i32(i64* %ptr, i32 %stride, <4 x i1> %b, i32 %evl) + ret <4 x i64> %load +} + +declare <8 x i64> @llvm.experimental.vp.strided.load.v8i64.p0i64.i32(i64*, i32, <8 x i1>, i32) + +define <8 x i64> @strided_vpload_v8i64(i64* %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_v8i64: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_v8i64: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + %load = call <8 x i64> @llvm.experimental.vp.strided.load.v8i64.p0i64.i32(i64* %ptr, i32 %stride, <8 x i1> %m, i32 %evl) + ret <8 x i64> %load +} + +declare <2 x half> @llvm.experimental.vp.strided.load.v2f16.p0f16.i32(half*, i32, <2 x i1>, i32) + +define <2 x half> @strided_vpload_v2f16(half* %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_v2f16: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_v2f16: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + %load = call <2 x half> @llvm.experimental.vp.strided.load.v2f16.p0f16.i32(half* %ptr, i32 %stride, <2 x i1> %m, i32 %evl) + ret <2 x half> %load +} + +define <2 x half> @strided_vpload_v2f16_allones_mask(half* %ptr, i32 signext %stride, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_v2f16_allones_mask: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1 +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_v2f16_allones_mask: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1 +; CHECK-RV64-NEXT: ret + %a = insertelement <2 x i1> poison, i1 true, i32 0 + %b = shufflevector <2 x i1> %a, <2 x i1> poison, <2 x i32> zeroinitializer + %load = call <2 x half> @llvm.experimental.vp.strided.load.v2f16.p0f16.i32(half* %ptr, i32 %stride, <2 x i1> %b, i32 %evl) + ret <2 x half> %load +} + +declare <4 x half> @llvm.experimental.vp.strided.load.v4f16.p0f16.i32(half*, i32, <4 x i1>, i32) + +define <4 x half> @strided_vpload_v4f16(half* %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_v4f16: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_v4f16: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + %load = call <4 x half> @llvm.experimental.vp.strided.load.v4f16.p0f16.i32(half* %ptr, i32 %stride, <4 x i1> %m, i32 %evl) + ret <4 x half> %load +} + +declare <8 x half> @llvm.experimental.vp.strided.load.v8f16.p0f16.i32(half*, i32, <8 x i1>, i32) + +define <8 x half> @strided_vpload_v8f16(half* %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_v8f16: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_v8f16: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + %load = call <8 x half> @llvm.experimental.vp.strided.load.v8f16.p0f16.i32(half* %ptr, i32 %stride, <8 x i1> %m, i32 %evl) + ret <8 x half> %load +} + +declare <2 x float> @llvm.experimental.vp.strided.load.v2f32.p0f32.i32(float*, i32, <2 x i1>, i32) + +define <2 x float> @strided_vpload_v2f32(float* %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_v2f32: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_v2f32: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + %load = call <2 x float> @llvm.experimental.vp.strided.load.v2f32.p0f32.i32(float* %ptr, i32 %stride, <2 x i1> %m, i32 %evl) + ret <2 x float> %load +} + +declare <4 x float> @llvm.experimental.vp.strided.load.v4f32.p0f32.i32(float*, i32, <4 x i1>, i32) + +define <4 x float> @strided_vpload_v4f32(float* %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_v4f32: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_v4f32: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + %load = call <4 x float> @llvm.experimental.vp.strided.load.v4f32.p0f32.i32(float* %ptr, i32 %stride, <4 x i1> %m, i32 %evl) + ret <4 x float> %load +} + +declare <8 x float> @llvm.experimental.vp.strided.load.v8f32.p0f32.i32(float*, i32, <8 x i1>, i32) + +define <8 x float> @strided_vpload_v8f32(float* %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_v8f32: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_v8f32: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + %load = call <8 x float> @llvm.experimental.vp.strided.load.v8f32.p0f32.i32(float* %ptr, i32 %stride, <8 x i1> %m, i32 %evl) + ret <8 x float> %load +} + +define <8 x float> @strided_vpload_v8f32_allones_mask(float* %ptr, i32 signext %stride, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_v8f32_allones_mask: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1 +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_v8f32_allones_mask: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1 +; CHECK-RV64-NEXT: ret + %a = insertelement <8 x i1> poison, i1 true, i32 0 + %b = shufflevector <8 x i1> %a, <8 x i1> poison, <8 x i32> zeroinitializer + %load = call <8 x float> @llvm.experimental.vp.strided.load.v8f32.p0f32.i32(float* %ptr, i32 %stride, <8 x i1> %b, i32 %evl) + ret <8 x float> %load +} + +declare <2 x double> @llvm.experimental.vp.strided.load.v2f64.p0f64.i32(double*, i32, <2 x i1>, i32) + +define <2 x double> @strided_vpload_v2f64(double* %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_v2f64: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_v2f64: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + %load = call <2 x double> @llvm.experimental.vp.strided.load.v2f64.p0f64.i32(double* %ptr, i32 %stride, <2 x i1> %m, i32 %evl) + ret <2 x double> %load +} + +declare <4 x double> @llvm.experimental.vp.strided.load.v4f64.p0f64.i32(double*, i32, <4 x i1>, i32) + +define <4 x double> @strided_vpload_v4f64(double* %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_v4f64: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_v4f64: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + %load = call <4 x double> @llvm.experimental.vp.strided.load.v4f64.p0f64.i32(double* %ptr, i32 %stride, <4 x i1> %m, i32 %evl) + ret <4 x double> %load +} + +define <4 x double> @strided_vpload_v4f64_allones_mask(double* %ptr, i32 signext %stride, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_v4f64_allones_mask: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1 +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_v4f64_allones_mask: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1 +; CHECK-RV64-NEXT: ret + %a = insertelement <4 x i1> poison, i1 true, i32 0 + %b = shufflevector <4 x i1> %a, <4 x i1> poison, <4 x i32> zeroinitializer + %load = call <4 x double> @llvm.experimental.vp.strided.load.v4f64.p0f64.i32(double* %ptr, i32 %stride, <4 x i1> %b, i32 %evl) + ret <4 x double> %load +} + +declare <8 x double> @llvm.experimental.vp.strided.load.v8f64.p0f64.i32(double*, i32, <8 x i1>, i32) + +define <8 x double> @strided_vpload_v8f64(double* %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_v8f64: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_v8f64: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + %load = call <8 x double> @llvm.experimental.vp.strided.load.v8f64.p0f64.i32(double* %ptr, i32 %stride, <8 x i1> %m, i32 %evl) + ret <8 x double> %load +} Index: llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpstore.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpstore.ll @@ -0,0 +1,457 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+v,+experimental-zvfh \ +; RUN: -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \ +; RUN: | FileCheck %s --check-prefixes=CHECK-RV32 +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+v,+experimental-zvfh \ +; RUN: -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \ +; RUN: | FileCheck %s --check-prefixes=CHECK-RV64 + +declare void @llvm.experimental.vp.strided.store.v2i8.p0i8.i8(<2 x i8>, i8*, i8, <2 x i1>, i32) + +define void @strided_vpstore_v2i8_i8(<2 x i8> %val, i8* %ptr, i8 signext %stride, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpstore_v2i8_i8: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV32-NEXT: vsse8.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpstore_v2i8_i8: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV64-NEXT: vsse8.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + call void @llvm.experimental.vp.strided.store.v2i8.p0i8.i8(<2 x i8> %val, i8* %ptr, i8 %stride, <2 x i1> %m, i32 %evl) + ret void +} + +declare void @llvm.experimental.vp.strided.store.v2i8.p0i8.i16(<2 x i8>, i8*, i16, <2 x i1>, i32) + +define void @strided_vpstore_v2i8_i16(<2 x i8> %val, i8* %ptr, i16 signext %stride, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpstore_v2i8_i16: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV32-NEXT: vsse8.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpstore_v2i8_i16: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV64-NEXT: vsse8.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + call void @llvm.experimental.vp.strided.store.v2i8.p0i8.i16(<2 x i8> %val, i8* %ptr, i16 %stride, <2 x i1> %m, i32 %evl) + ret void +} + +declare void @llvm.experimental.vp.strided.store.v2i8.p0i8.i64(<2 x i8>, i8*, i64, <2 x i1>, i32) + +define void @strided_vpstore_v2i8_i64(<2 x i8> %val, i8* %ptr, i64 signext %stride, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpstore_v2i8_i64: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a3, e8, mf8, ta, mu +; CHECK-RV32-NEXT: vsse8.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpstore_v2i8_i64: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV64-NEXT: vsse8.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + call void @llvm.experimental.vp.strided.store.v2i8.p0i8.i64(<2 x i8> %val, i8* %ptr, i64 %stride, <2 x i1> %m, i32 %evl) + ret void +} + +declare void @llvm.experimental.vp.strided.store.v2i8.p0i8.i32(<2 x i8>, i8*, i32, <2 x i1>, i32) + +define void @strided_vpstore_v2i8(<2 x i8> %val, i8* %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpstore_v2i8: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV32-NEXT: vsse8.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpstore_v2i8: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV64-NEXT: vsse8.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + call void @llvm.experimental.vp.strided.store.v2i8.p0i8.i32(<2 x i8> %val, i8* %ptr, i32 %stride, <2 x i1> %m, i32 %evl) + ret void +} + +declare void @llvm.experimental.vp.strided.store.v4i8.p0i8.i32(<4 x i8>, i8*, i32, <4 x i1>, i32) + +define void @strided_vpstore_v4i8(<4 x i8> %val, i8* %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpstore_v4i8: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf4, ta, mu +; CHECK-RV32-NEXT: vsse8.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpstore_v4i8: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf4, ta, mu +; CHECK-RV64-NEXT: vsse8.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + call void @llvm.experimental.vp.strided.store.v4i8.p0i8.i32(<4 x i8> %val, i8* %ptr, i32 %stride, <4 x i1> %m, i32 %evl) + ret void +} + +declare void @llvm.experimental.vp.strided.store.v8i8.p0i8.i32(<8 x i8>, i8*, i32, <8 x i1>, i32) + +define void @strided_vpstore_v8i8(<8 x i8> %val, i8* %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpstore_v8i8: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf2, ta, mu +; CHECK-RV32-NEXT: vsse8.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpstore_v8i8: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf2, ta, mu +; CHECK-RV64-NEXT: vsse8.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + call void @llvm.experimental.vp.strided.store.v8i8.p0i8.i32(<8 x i8> %val, i8* %ptr, i32 %stride, <8 x i1> %m, i32 %evl) + ret void +} + +declare void @llvm.experimental.vp.strided.store.v2i16.p0i16.i32(<2 x i16>, i16*, i32, <2 x i1>, i32) + +define void @strided_vpstore_v2i16(<2 x i16> %val, i16* %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpstore_v2i16: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-RV32-NEXT: vsse16.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpstore_v2i16: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-RV64-NEXT: vsse16.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + call void @llvm.experimental.vp.strided.store.v2i16.p0i16.i32(<2 x i16> %val, i16* %ptr, i32 %stride, <2 x i1> %m, i32 %evl) + ret void +} + +declare void @llvm.experimental.vp.strided.store.v4i16.p0i16.i32(<4 x i16>, i16*, i32, <4 x i1>, i32) + +define void @strided_vpstore_v4i16(<4 x i16> %val, i16* %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpstore_v4i16: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-RV32-NEXT: vsse16.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpstore_v4i16: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-RV64-NEXT: vsse16.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + call void @llvm.experimental.vp.strided.store.v4i16.p0i16.i32(<4 x i16> %val, i16* %ptr, i32 %stride, <4 x i1> %m, i32 %evl) + ret void +} + +declare void @llvm.experimental.vp.strided.store.v8i16.p0i16.i32(<8 x i16>, i16*, i32, <8 x i1>, i32) + +define void @strided_vpstore_v8i16(<8 x i16> %val, i16* %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpstore_v8i16: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-RV32-NEXT: vsse16.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpstore_v8i16: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-RV64-NEXT: vsse16.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + call void @llvm.experimental.vp.strided.store.v8i16.p0i16.i32(<8 x i16> %val, i16* %ptr, i32 %stride, <8 x i1> %m, i32 %evl) + ret void +} + +declare void @llvm.experimental.vp.strided.store.v2i32.p0i32.i32(<2 x i32>, i32*, i32, <2 x i1>, i32) + +define void @strided_vpstore_v2i32(<2 x i32> %val, i32* %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpstore_v2i32: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-RV32-NEXT: vsse32.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpstore_v2i32: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-RV64-NEXT: vsse32.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + call void @llvm.experimental.vp.strided.store.v2i32.p0i32.i32(<2 x i32> %val, i32* %ptr, i32 %stride, <2 x i1> %m, i32 %evl) + ret void +} + +declare void @llvm.experimental.vp.strided.store.v4i32.p0i32.i32(<4 x i32>, i32*, i32, <4 x i1>, i32) + +define void @strided_vpstore_v4i32(<4 x i32> %val, i32* %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpstore_v4i32: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-RV32-NEXT: vsse32.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpstore_v4i32: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-RV64-NEXT: vsse32.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + call void @llvm.experimental.vp.strided.store.v4i32.p0i32.i32(<4 x i32> %val, i32* %ptr, i32 %stride, <4 x i1> %m, i32 %evl) + ret void +} + +declare void @llvm.experimental.vp.strided.store.v8i32.p0i32.i32(<8 x i32>, i32*, i32, <8 x i1>, i32) + +define void @strided_vpstore_v8i32(<8 x i32> %val, i32* %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpstore_v8i32: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-RV32-NEXT: vsse32.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpstore_v8i32: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-RV64-NEXT: vsse32.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + call void @llvm.experimental.vp.strided.store.v8i32.p0i32.i32(<8 x i32> %val, i32* %ptr, i32 %stride, <8 x i1> %m, i32 %evl) + ret void +} + +declare void @llvm.experimental.vp.strided.store.v2i64.p0i64.i32(<2 x i64>, i64*, i32, <2 x i1>, i32) + +define void @strided_vpstore_v2i64(<2 x i64> %val, i64* %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpstore_v2i64: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-RV32-NEXT: vsse64.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpstore_v2i64: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-RV64-NEXT: vsse64.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + call void @llvm.experimental.vp.strided.store.v2i64.p0i64.i32(<2 x i64> %val, i64* %ptr, i32 %stride, <2 x i1> %m, i32 %evl) + ret void +} + +declare void @llvm.experimental.vp.strided.store.v4i64.p0i64.i32(<4 x i64>, i64*, i32, <4 x i1>, i32) + +define void @strided_vpstore_v4i64(<4 x i64> %val, i64* %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpstore_v4i64: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-RV32-NEXT: vsse64.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpstore_v4i64: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-RV64-NEXT: vsse64.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + call void @llvm.experimental.vp.strided.store.v4i64.p0i64.i32(<4 x i64> %val, i64* %ptr, i32 %stride, <4 x i1> %m, i32 %evl) + ret void +} + +declare void @llvm.experimental.vp.strided.store.v8i64.p0i64.i32(<8 x i64>, i64*, i32, <8 x i1>, i32) + +define void @strided_vpstore_v8i64(<8 x i64> %val, i64* %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpstore_v8i64: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-RV32-NEXT: vsse64.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpstore_v8i64: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-RV64-NEXT: vsse64.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + call void @llvm.experimental.vp.strided.store.v8i64.p0i64.i32(<8 x i64> %val, i64* %ptr, i32 %stride, <8 x i1> %m, i32 %evl) + ret void +} + +declare void @llvm.experimental.vp.strided.store.v2f16.p0f16.i32(<2 x half>, half*, i32, <2 x i1>, i32) + +define void @strided_vpstore_v2f16(<2 x half> %val, half* %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpstore_v2f16: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-RV32-NEXT: vsse16.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpstore_v2f16: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-RV64-NEXT: vsse16.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + call void @llvm.experimental.vp.strided.store.v2f16.p0f16.i32(<2 x half> %val, half* %ptr, i32 %stride, <2 x i1> %m, i32 %evl) + ret void +} + +declare void @llvm.experimental.vp.strided.store.v4f16.p0f16.i32(<4 x half>, half*, i32, <4 x i1>, i32) + +define void @strided_vpstore_v4f16(<4 x half> %val, half* %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpstore_v4f16: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-RV32-NEXT: vsse16.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpstore_v4f16: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-RV64-NEXT: vsse16.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + call void @llvm.experimental.vp.strided.store.v4f16.p0f16.i32(<4 x half> %val, half* %ptr, i32 %stride, <4 x i1> %m, i32 %evl) + ret void +} + +declare void @llvm.experimental.vp.strided.store.v8f16.p0f16.i32(<8 x half>, half*, i32, <8 x i1>, i32) + +define void @strided_vpstore_v8f16(<8 x half> %val, half* %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpstore_v8f16: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-RV32-NEXT: vsse16.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpstore_v8f16: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-RV64-NEXT: vsse16.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + call void @llvm.experimental.vp.strided.store.v8f16.p0f16.i32(<8 x half> %val, half* %ptr, i32 %stride, <8 x i1> %m, i32 %evl) + ret void +} + +declare void @llvm.experimental.vp.strided.store.v2f32.p0f32.i32(<2 x float>, float*, i32, <2 x i1>, i32) + +define void @strided_vpstore_v2f32(<2 x float> %val, float* %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpstore_v2f32: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-RV32-NEXT: vsse32.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpstore_v2f32: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-RV64-NEXT: vsse32.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + call void @llvm.experimental.vp.strided.store.v2f32.p0f32.i32(<2 x float> %val, float* %ptr, i32 %stride, <2 x i1> %m, i32 %evl) + ret void +} + +declare void @llvm.experimental.vp.strided.store.v4f32.p0f32.i32(<4 x float>, float*, i32, <4 x i1>, i32) + +define void @strided_vpstore_v4f32(<4 x float> %val, float* %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpstore_v4f32: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-RV32-NEXT: vsse32.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpstore_v4f32: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-RV64-NEXT: vsse32.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + call void @llvm.experimental.vp.strided.store.v4f32.p0f32.i32(<4 x float> %val, float* %ptr, i32 %stride, <4 x i1> %m, i32 %evl) + ret void +} + +declare void @llvm.experimental.vp.strided.store.v8f32.p0f32.i32(<8 x float>, float*, i32, <8 x i1>, i32) + +define void @strided_vpstore_v8f32(<8 x float> %val, float* %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpstore_v8f32: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-RV32-NEXT: vsse32.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpstore_v8f32: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-RV64-NEXT: vsse32.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + call void @llvm.experimental.vp.strided.store.v8f32.p0f32.i32(<8 x float> %val, float* %ptr, i32 %stride, <8 x i1> %m, i32 %evl) + ret void +} + +declare void @llvm.experimental.vp.strided.store.v2f64.p0f64.i32(<2 x double>, double*, i32, <2 x i1>, i32) + +define void @strided_vpstore_v2f64(<2 x double> %val, double* %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpstore_v2f64: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-RV32-NEXT: vsse64.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpstore_v2f64: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-RV64-NEXT: vsse64.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + call void @llvm.experimental.vp.strided.store.v2f64.p0f64.i32(<2 x double> %val, double* %ptr, i32 %stride, <2 x i1> %m, i32 %evl) + ret void +} + +declare void @llvm.experimental.vp.strided.store.v4f64.p0f64.i32(<4 x double>, double*, i32, <4 x i1>, i32) + +define void @strided_vpstore_v4f64(<4 x double> %val, double* %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpstore_v4f64: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-RV32-NEXT: vsse64.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpstore_v4f64: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-RV64-NEXT: vsse64.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + call void @llvm.experimental.vp.strided.store.v4f64.p0f64.i32(<4 x double> %val, double* %ptr, i32 %stride, <4 x i1> %m, i32 %evl) + ret void +} + +declare void @llvm.experimental.vp.strided.store.v8f64.p0f64.i32(<8 x double>, double*, i32, <8 x i1>, i32) + +define void @strided_vpstore_v8f64(<8 x double> %val, double* %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpstore_v8f64: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-RV32-NEXT: vsse64.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpstore_v8f64: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-RV64-NEXT: vsse64.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + call void @llvm.experimental.vp.strided.store.v8f64.p0f64.i32(<8 x double> %val, double* %ptr, i32 %stride, <8 x i1> %m, i32 %evl) + ret void +} + +define void @strided_vpstore_v2i8_allones_mask(<2 x i8> %val, i8* %ptr, i32 signext %stride, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpstore_v2i8_allones_mask: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV32-NEXT: vsse8.v v8, (a0), a1 +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpstore_v2i8_allones_mask: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV64-NEXT: vsse8.v v8, (a0), a1 +; CHECK-RV64-NEXT: ret + %a = insertelement <2 x i1> poison, i1 true, i32 0 + %b = shufflevector <2 x i1> %a, <2 x i1> poison, <2 x i32> zeroinitializer + call void @llvm.experimental.vp.strided.store.v2i8.p0i8.i32(<2 x i8> %val, i8* %ptr, i32 %stride, <2 x i1> %b, i32 %evl) + ret void +} Index: llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll @@ -0,0 +1,725 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+v,+experimental-zvfh \ +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-RV32 +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+v,+experimental-zvfh \ +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-RV64 + +declare @llvm.experimental.vp.strided.load.nxv1i8.p0i8.i8(i8*, i8, , i32) + +define @strided_vpload_nxv1i8_i8(i8* %ptr, i8 signext %stride, %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_nxv1i8_i8: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV32-NEXT: vlse8.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_nxv1i8_i8: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV64-NEXT: vlse8.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + %load = call @llvm.experimental.vp.strided.load.nxv1i8.p0i8.i8(i8* %ptr, i8 %stride, %m, i32 %evl) + ret %load +} + +declare @llvm.experimental.vp.strided.load.nxv1i8.p0i8.i16(i8*, i16, , i32) + +define @strided_vpload_nxv1i8_i16(i8* %ptr, i16 signext %stride, %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_nxv1i8_i16: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV32-NEXT: vlse8.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_nxv1i8_i16: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV64-NEXT: vlse8.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + %load = call @llvm.experimental.vp.strided.load.nxv1i8.p0i8.i16(i8* %ptr, i16 %stride, %m, i32 %evl) + ret %load +} + +declare @llvm.experimental.vp.strided.load.nxv1i8.p0i8.i64(i8*, i64, , i32) + +define @strided_vpload_nxv1i8_i64(i8* %ptr, i64 signext %stride, %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_nxv1i8_i64: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a3, e8, mf8, ta, mu +; CHECK-RV32-NEXT: vlse8.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_nxv1i8_i64: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV64-NEXT: vlse8.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + %load = call @llvm.experimental.vp.strided.load.nxv1i8.p0i8.i64(i8* %ptr, i64 %stride, %m, i32 %evl) + ret %load +} + +define @strided_vpload_nxv1i8_i64_allones_mask(i8* %ptr, i64 signext %stride, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_nxv1i8_i64_allones_mask: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a3, e8, mf8, ta, mu +; CHECK-RV32-NEXT: vlse8.v v8, (a0), a1 +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_nxv1i8_i64_allones_mask: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV64-NEXT: vlse8.v v8, (a0), a1 +; CHECK-RV64-NEXT: ret + %a = insertelement poison, i1 true, i32 0 + %b = shufflevector %a, poison, zeroinitializer + %load = call @llvm.experimental.vp.strided.load.nxv1i8.p0i8.i64(i8* %ptr, i64 %stride, %b, i32 %evl) + ret %load +} + +declare @llvm.experimental.vp.strided.load.nxv1i8.p0i8.i32(i8*, i32, , i32) + +define @strided_vpload_nxv1i8(i8* %ptr, i32 signext %stride, %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_nxv1i8: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV32-NEXT: vlse8.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_nxv1i8: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV64-NEXT: vlse8.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + %load = call @llvm.experimental.vp.strided.load.nxv1i8.p0i8.i32(i8* %ptr, i32 signext %stride, %m, i32 %evl) + ret %load +} + +define @strided_vpload_nxv1i8_allones_mask(i8* %ptr, i32 signext %stride, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_nxv1i8_allones_mask: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV32-NEXT: vlse8.v v8, (a0), a1 +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_nxv1i8_allones_mask: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV64-NEXT: vlse8.v v8, (a0), a1 +; CHECK-RV64-NEXT: ret + %a = insertelement poison, i1 true, i32 0 + %b = shufflevector %a, poison, zeroinitializer + %load = call @llvm.experimental.vp.strided.load.nxv1i8.p0i8.i32(i8* %ptr, i32 signext %stride, %b, i32 %evl) + ret %load +} + +declare @llvm.experimental.vp.strided.load.nxv2i8.p0i8.i32(i8*, i32, , i32) + +define @strided_vpload_nxv2i8(i8* %ptr, i32 signext %stride, %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_nxv2i8: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf4, ta, mu +; CHECK-RV32-NEXT: vlse8.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_nxv2i8: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf4, ta, mu +; CHECK-RV64-NEXT: vlse8.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + %load = call @llvm.experimental.vp.strided.load.nxv2i8.p0i8.i32(i8* %ptr, i32 signext %stride, %m, i32 %evl) + ret %load +} + +declare @llvm.experimental.vp.strided.load.nxv4i8.p0i8.i32(i8*, i32, , i32) + +define @strided_vpload_nxv4i8(i8* %ptr, i32 signext %stride, %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_nxv4i8: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf2, ta, mu +; CHECK-RV32-NEXT: vlse8.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_nxv4i8: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf2, ta, mu +; CHECK-RV64-NEXT: vlse8.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + %load = call @llvm.experimental.vp.strided.load.nxv4i8.p0i8.i32(i8* %ptr, i32 signext %stride, %m, i32 %evl) + ret %load +} + +declare @llvm.experimental.vp.strided.load.nxv8i8.p0i8.i32(i8*, i32, , i32) + +define @strided_vpload_nxv8i8(i8* %ptr, i32 signext %stride, %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_nxv8i8: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e8, m1, ta, mu +; CHECK-RV32-NEXT: vlse8.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_nxv8i8: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e8, m1, ta, mu +; CHECK-RV64-NEXT: vlse8.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + %load = call @llvm.experimental.vp.strided.load.nxv8i8.p0i8.i32(i8* %ptr, i32 signext %stride, %m, i32 %evl) + ret %load +} + +define @strided_vpload_nxv8i8_allones_mask(i8* %ptr, i32 signext %stride, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_nxv8i8_allones_mask: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e8, m1, ta, mu +; CHECK-RV32-NEXT: vlse8.v v8, (a0), a1 +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_nxv8i8_allones_mask: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e8, m1, ta, mu +; CHECK-RV64-NEXT: vlse8.v v8, (a0), a1 +; CHECK-RV64-NEXT: ret + %a = insertelement poison, i1 true, i32 0 + %b = shufflevector %a, poison, zeroinitializer + %load = call @llvm.experimental.vp.strided.load.nxv8i8.p0i8.i32(i8* %ptr, i32 signext %stride, %b, i32 %evl) + ret %load +} + +declare @llvm.experimental.vp.strided.load.nxv1i16.p0i16.i32(i16*, i32, , i32) + +define @strided_vpload_nxv1i16(i16* %ptr, i32 signext %stride, %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_nxv1i16: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_nxv1i16: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + %load = call @llvm.experimental.vp.strided.load.nxv1i16.p0i16.i32(i16* %ptr, i32 signext %stride, %m, i32 %evl) + ret %load +} + +declare @llvm.experimental.vp.strided.load.nxv2i16.p0i16.i32(i16*, i32, , i32) + +define @strided_vpload_nxv2i16(i16* %ptr, i32 signext %stride, %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_nxv2i16: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_nxv2i16: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + %load = call @llvm.experimental.vp.strided.load.nxv2i16.p0i16.i32(i16* %ptr, i32 signext %stride, %m, i32 %evl) + ret %load +} + +define @strided_vpload_nxv2i16_allones_mask(i16* %ptr, i32 signext %stride, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_nxv2i16_allones_mask: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1 +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_nxv2i16_allones_mask: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1 +; CHECK-RV64-NEXT: ret + %a = insertelement poison, i1 true, i32 0 + %b = shufflevector %a, poison, zeroinitializer + %load = call @llvm.experimental.vp.strided.load.nxv2i16.p0i16.i32(i16* %ptr, i32 signext %stride, %b, i32 %evl) + ret %load +} + +declare @llvm.experimental.vp.strided.load.nxv4i16.p0i16.i32(i16*, i32, , i32) + +define @strided_vpload_nxv4i16(i16* %ptr, i32 signext %stride, %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_nxv4i16: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_nxv4i16: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + %load = call @llvm.experimental.vp.strided.load.nxv4i16.p0i16.i32(i16* %ptr, i32 signext %stride, %m, i32 %evl) + ret %load +} + +declare @llvm.experimental.vp.strided.load.nxv8i16.p0i16.i32(i16*, i32, , i32) + +define @strided_vpload_nxv8i16(i16* %ptr, i32 signext %stride, %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_nxv8i16: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e16, m2, ta, mu +; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_nxv8i16: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e16, m2, ta, mu +; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + %load = call @llvm.experimental.vp.strided.load.nxv8i16.p0i16.i32(i16* %ptr, i32 signext %stride, %m, i32 %evl) + ret %load +} + +declare @llvm.experimental.vp.strided.load.nxv1i32.p0i32.i32(i32*, i32, , i32) + +define @strided_vpload_nxv1i32(i32* %ptr, i32 signext %stride, %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_nxv1i32: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_nxv1i32: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + %load = call @llvm.experimental.vp.strided.load.nxv1i32.p0i32.i32(i32* %ptr, i32 signext %stride, %m, i32 %evl) + ret %load +} + +declare @llvm.experimental.vp.strided.load.nxv2i32.p0i32.i32(i32*, i32, , i32) + +define @strided_vpload_nxv2i32(i32* %ptr, i32 signext %stride, %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_nxv2i32: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_nxv2i32: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + %load = call @llvm.experimental.vp.strided.load.nxv2i32.p0i32.i32(i32* %ptr, i32 signext %stride, %m, i32 %evl) + ret %load +} + +declare @llvm.experimental.vp.strided.load.nxv4i32.p0i32.i32(i32*, i32, , i32) + +define @strided_vpload_nxv4i32(i32* %ptr, i32 signext %stride, %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_nxv4i32: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_nxv4i32: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + %load = call @llvm.experimental.vp.strided.load.nxv4i32.p0i32.i32(i32* %ptr, i32 signext %stride, %m, i32 %evl) + ret %load +} + +define @strided_vpload_nxv4i32_allones_mask(i32* %ptr, i32 signext %stride, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_nxv4i32_allones_mask: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1 +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_nxv4i32_allones_mask: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1 +; CHECK-RV64-NEXT: ret + %a = insertelement poison, i1 true, i32 0 + %b = shufflevector %a, poison, zeroinitializer + %load = call @llvm.experimental.vp.strided.load.nxv4i32.p0i32.i32(i32* %ptr, i32 signext %stride, %b, i32 %evl) + ret %load +} + +declare @llvm.experimental.vp.strided.load.nxv8i32.p0i32.i32(i32*, i32, , i32) + +define @strided_vpload_nxv8i32(i32* %ptr, i32 signext %stride, %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_nxv8i32: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m4, ta, mu +; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_nxv8i32: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m4, ta, mu +; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + %load = call @llvm.experimental.vp.strided.load.nxv8i32.p0i32.i32(i32* %ptr, i32 signext %stride, %m, i32 %evl) + ret %load +} + +declare @llvm.experimental.vp.strided.load.nxv1i64.p0i64.i32(i64*, i32, , i32) + +define @strided_vpload_nxv1i64(i64* %ptr, i32 signext %stride, %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_nxv1i64: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_nxv1i64: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + %load = call @llvm.experimental.vp.strided.load.nxv1i64.p0i64.i32(i64* %ptr, i32 signext %stride, %m, i32 %evl) + ret %load +} + +define @strided_vpload_nxv1i64_allones_mask(i64* %ptr, i32 signext %stride, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_nxv1i64_allones_mask: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1 +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_nxv1i64_allones_mask: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1 +; CHECK-RV64-NEXT: ret + %a = insertelement poison, i1 true, i32 0 + %b = shufflevector %a, poison, zeroinitializer + %load = call @llvm.experimental.vp.strided.load.nxv1i64.p0i64.i32(i64* %ptr, i32 signext %stride, %b, i32 %evl) + ret %load +} + +declare @llvm.experimental.vp.strided.load.nxv2i64.p0i64.i32(i64*, i32, , i32) + +define @strided_vpload_nxv2i64(i64* %ptr, i32 signext %stride, %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_nxv2i64: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_nxv2i64: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + %load = call @llvm.experimental.vp.strided.load.nxv2i64.p0i64.i32(i64* %ptr, i32 signext %stride, %m, i32 %evl) + ret %load +} + +declare @llvm.experimental.vp.strided.load.nxv4i64.p0i64.i32(i64*, i32, , i32) + +define @strided_vpload_nxv4i64(i64* %ptr, i32 signext %stride, %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_nxv4i64: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_nxv4i64: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + %load = call @llvm.experimental.vp.strided.load.nxv4i64.p0i64.i32(i64* %ptr, i32 signext %stride, %m, i32 %evl) + ret %load +} + +declare @llvm.experimental.vp.strided.load.nxv8i64.p0i64.i32(i64*, i32, , i32) + +define @strided_vpload_nxv8i64(i64* %ptr, i32 signext %stride, %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_nxv8i64: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_nxv8i64: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + %load = call @llvm.experimental.vp.strided.load.nxv8i64.p0i64.i32(i64* %ptr, i32 signext %stride, %m, i32 %evl) + ret %load +} + +declare @llvm.experimental.vp.strided.load.nxv1f16.p0f16.i32(half*, i32, , i32) + +define @strided_vpload_nxv1f16(half* %ptr, i32 signext %stride, %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_nxv1f16: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_nxv1f16: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + %load = call @llvm.experimental.vp.strided.load.nxv1f16.p0f16.i32(half* %ptr, i32 signext %stride, %m, i32 %evl) + ret %load +} + +declare @llvm.experimental.vp.strided.load.nxv2f16.p0f16.i32(half*, i32, , i32) + +define @strided_vpload_nxv2f16(half* %ptr, i32 signext %stride, %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_nxv2f16: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_nxv2f16: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + %load = call @llvm.experimental.vp.strided.load.nxv2f16.p0f16.i32(half* %ptr, i32 signext %stride, %m, i32 %evl) + ret %load +} + +define @strided_vpload_nxv2f16_allones_mask(half* %ptr, i32 signext %stride, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_nxv2f16_allones_mask: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1 +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_nxv2f16_allones_mask: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1 +; CHECK-RV64-NEXT: ret + %a = insertelement poison, i1 true, i32 0 + %b = shufflevector %a, poison, zeroinitializer + %load = call @llvm.experimental.vp.strided.load.nxv2f16.p0f16.i32(half* %ptr, i32 signext %stride, %b, i32 %evl) + ret %load +} + +declare @llvm.experimental.vp.strided.load.nxv4f16.p0f16.i32(half*, i32, , i32) + +define @strided_vpload_nxv4f16(half* %ptr, i32 signext %stride, %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_nxv4f16: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_nxv4f16: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + %load = call @llvm.experimental.vp.strided.load.nxv4f16.p0f16.i32(half* %ptr, i32 signext %stride, %m, i32 %evl) + ret %load +} + +declare @llvm.experimental.vp.strided.load.nxv8f16.p0f16.i32(half*, i32, , i32) + +define @strided_vpload_nxv8f16(half* %ptr, i32 signext %stride, %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_nxv8f16: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e16, m2, ta, mu +; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_nxv8f16: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e16, m2, ta, mu +; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + %load = call @llvm.experimental.vp.strided.load.nxv8f16.p0f16.i32(half* %ptr, i32 signext %stride, %m, i32 %evl) + ret %load +} + +declare @llvm.experimental.vp.strided.load.nxv1f32.p0f32.i32(float*, i32, , i32) + +define @strided_vpload_nxv1f32(float* %ptr, i32 signext %stride, %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_nxv1f32: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_nxv1f32: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + %load = call @llvm.experimental.vp.strided.load.nxv1f32.p0f32.i32(float* %ptr, i32 signext %stride, %m, i32 %evl) + ret %load +} + +declare @llvm.experimental.vp.strided.load.nxv2f32.p0f32.i32(float*, i32, , i32) + +define @strided_vpload_nxv2f32(float* %ptr, i32 signext %stride, %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_nxv2f32: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_nxv2f32: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + %load = call @llvm.experimental.vp.strided.load.nxv2f32.p0f32.i32(float* %ptr, i32 signext %stride, %m, i32 %evl) + ret %load +} + +declare @llvm.experimental.vp.strided.load.nxv4f32.p0f32.i32(float*, i32, , i32) + +define @strided_vpload_nxv4f32(float* %ptr, i32 signext %stride, %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_nxv4f32: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_nxv4f32: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + %load = call @llvm.experimental.vp.strided.load.nxv4f32.p0f32.i32(float* %ptr, i32 signext %stride, %m, i32 %evl) + ret %load +} + +declare @llvm.experimental.vp.strided.load.nxv8f32.p0f32.i32(float*, i32, , i32) + +define @strided_vpload_nxv8f32(float* %ptr, i32 signext %stride, %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_nxv8f32: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m4, ta, mu +; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_nxv8f32: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m4, ta, mu +; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + %load = call @llvm.experimental.vp.strided.load.nxv8f32.p0f32.i32(float* %ptr, i32 signext %stride, %m, i32 %evl) + ret %load +} + +define @strided_vpload_nxv8f32_allones_mask(float* %ptr, i32 signext %stride, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_nxv8f32_allones_mask: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m4, ta, mu +; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1 +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_nxv8f32_allones_mask: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m4, ta, mu +; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1 +; CHECK-RV64-NEXT: ret + %a = insertelement poison, i1 true, i32 0 + %b = shufflevector %a, poison, zeroinitializer + %load = call @llvm.experimental.vp.strided.load.nxv8f32.p0f32.i32(float* %ptr, i32 signext %stride, %b, i32 %evl) + ret %load +} + +declare @llvm.experimental.vp.strided.load.nxv1f64.p0f64.i32(double*, i32, , i32) + +define @strided_vpload_nxv1f64(double* %ptr, i32 signext %stride, %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_nxv1f64: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_nxv1f64: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + %load = call @llvm.experimental.vp.strided.load.nxv1f64.p0f64.i32(double* %ptr, i32 signext %stride, %m, i32 %evl) + ret %load +} + +declare @llvm.experimental.vp.strided.load.nxv2f64.p0f64.i32(double*, i32, , i32) + +define @strided_vpload_nxv2f64(double* %ptr, i32 signext %stride, %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_nxv2f64: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_nxv2f64: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + %load = call @llvm.experimental.vp.strided.load.nxv2f64.p0f64.i32(double* %ptr, i32 signext %stride, %m, i32 %evl) + ret %load +} + +declare @llvm.experimental.vp.strided.load.nxv4f64.p0f64.i32(double*, i32, , i32) + +define @strided_vpload_nxv4f64(double* %ptr, i32 signext %stride, %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_nxv4f64: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_nxv4f64: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + %load = call @llvm.experimental.vp.strided.load.nxv4f64.p0f64.i32(double* %ptr, i32 signext %stride, %m, i32 %evl) + ret %load +} + +define @strided_vpload_nxv4f64_allones_mask(double* %ptr, i32 signext %stride, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_nxv4f64_allones_mask: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1 +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_nxv4f64_allones_mask: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1 +; CHECK-RV64-NEXT: ret + %a = insertelement poison, i1 true, i32 0 + %b = shufflevector %a, poison, zeroinitializer + %load = call @llvm.experimental.vp.strided.load.nxv4f64.p0f64.i32(double* %ptr, i32 signext %stride, %b, i32 %evl) + ret %load +} + +declare @llvm.experimental.vp.strided.load.nxv8f64.p0f64.i32(double*, i32, , i32) + +define @strided_vpload_nxv8f64(double* %ptr, i32 signext %stride, %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpload_nxv8f64: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpload_nxv8f64: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + %load = call @llvm.experimental.vp.strided.load.nxv8f64.p0f64.i32(double* %ptr, i32 signext %stride, %m, i32 %evl) + ret %load +} Index: llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll @@ -0,0 +1,581 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+v,+experimental-zvfh \ +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-RV32 +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+v,+experimental-zvfh \ +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-RV64 + +declare void @llvm.experimental.vp.strided.store.nxv1i8.p0i8.i8(, i8*, i8, , i32) + +define void @strided_vpstore_nxv1i8_i8( %val, i8* %ptr, i8 signext %stride, %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpstore_nxv1i8_i8: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV32-NEXT: vsse8.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpstore_nxv1i8_i8: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV64-NEXT: vsse8.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + call void @llvm.experimental.vp.strided.store.nxv1i8.p0i8.i8( %val, i8* %ptr, i8 %stride, %m, i32 %evl) + ret void +} + +declare void @llvm.experimental.vp.strided.store.nxv1i8.p0i8.i16(, i8*, i16, , i32) + +define void @strided_vpstore_nxv1i8_i16( %val, i8* %ptr, i16 signext %stride, %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpstore_nxv1i8_i16: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV32-NEXT: vsse8.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpstore_nxv1i8_i16: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV64-NEXT: vsse8.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + call void @llvm.experimental.vp.strided.store.nxv1i8.p0i8.i16( %val, i8* %ptr, i16 %stride, %m, i32 %evl) + ret void +} + +declare void @llvm.experimental.vp.strided.store.nxv1i8.p0i8.i64(, i8*, i64, , i32) + +define void @strided_vpstore_nxv1i8_i64( %val, i8* %ptr, i64 signext %stride, %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpstore_nxv1i8_i64: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a3, e8, mf8, ta, mu +; CHECK-RV32-NEXT: vsse8.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpstore_nxv1i8_i64: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV64-NEXT: vsse8.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + call void @llvm.experimental.vp.strided.store.nxv1i8.p0i8.i64( %val, i8* %ptr, i64 %stride, %m, i32 %evl) + ret void +} + +declare void @llvm.experimental.vp.strided.store.nxv1i8.p0i8.i32(, i8*, i32, , i32) + +define void @strided_vpstore_nxv1i8( %val, i8* %ptr, i32 signext %strided, %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpstore_nxv1i8: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV32-NEXT: vsse8.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpstore_nxv1i8: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV64-NEXT: vsse8.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + call void @llvm.experimental.vp.strided.store.nxv1i8.p0i8.i32( %val, i8* %ptr, i32 %strided, %m, i32 %evl) + ret void +} + +declare void @llvm.experimental.vp.strided.store.nxv2i8.p0i8.i32(, i8*, i32, , i32) + +define void @strided_vpstore_nxv2i8( %val, i8* %ptr, i32 signext %strided, %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpstore_nxv2i8: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf4, ta, mu +; CHECK-RV32-NEXT: vsse8.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpstore_nxv2i8: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf4, ta, mu +; CHECK-RV64-NEXT: vsse8.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + call void @llvm.experimental.vp.strided.store.nxv2i8.p0i8.i32( %val, i8* %ptr, i32 %strided, %m, i32 %evl) + ret void +} + +declare void @llvm.experimental.vp.strided.store.nxv4i8.p0i8.i32(, i8*, i32, , i32) + +define void @strided_vpstore_nxv4i8( %val, i8* %ptr, i32 signext %strided, %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpstore_nxv4i8: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf2, ta, mu +; CHECK-RV32-NEXT: vsse8.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpstore_nxv4i8: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf2, ta, mu +; CHECK-RV64-NEXT: vsse8.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + call void @llvm.experimental.vp.strided.store.nxv4i8.p0i8.i32( %val, i8* %ptr, i32 %strided, %m, i32 %evl) + ret void +} + +declare void @llvm.experimental.vp.strided.store.nxv8i8.p0i8.i32(, i8*, i32, , i32) + +define void @strided_vpstore_nxv8i8( %val, i8* %ptr, i32 signext %strided, %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpstore_nxv8i8: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e8, m1, ta, mu +; CHECK-RV32-NEXT: vsse8.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpstore_nxv8i8: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e8, m1, ta, mu +; CHECK-RV64-NEXT: vsse8.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + call void @llvm.experimental.vp.strided.store.nxv8i8.p0i8.i32( %val, i8* %ptr, i32 %strided, %m, i32 %evl) + ret void +} + +declare void @llvm.experimental.vp.strided.store.nxv1i16.p0i16.i32(, i16*, i32, , i32) + +define void @strided_vpstore_nxv1i16( %val, i16* %ptr, i32 signext %strided, %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpstore_nxv1i16: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-RV32-NEXT: vsse16.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpstore_nxv1i16: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-RV64-NEXT: vsse16.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + call void @llvm.experimental.vp.strided.store.nxv1i16.p0i16.i32( %val, i16* %ptr, i32 %strided, %m, i32 %evl) + ret void +} + +declare void @llvm.experimental.vp.strided.store.nxv2i16.p0i16.i32(, i16*, i32, , i32) + +define void @strided_vpstore_nxv2i16( %val, i16* %ptr, i32 signext %strided, %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpstore_nxv2i16: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-RV32-NEXT: vsse16.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpstore_nxv2i16: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-RV64-NEXT: vsse16.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + call void @llvm.experimental.vp.strided.store.nxv2i16.p0i16.i32( %val, i16* %ptr, i32 %strided, %m, i32 %evl) + ret void +} + +declare void @llvm.experimental.vp.strided.store.nxv4i16.p0i16.i32(, i16*, i32, , i32) + +define void @strided_vpstore_nxv4i16( %val, i16* %ptr, i32 signext %strided, %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpstore_nxv4i16: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-RV32-NEXT: vsse16.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpstore_nxv4i16: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-RV64-NEXT: vsse16.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + call void @llvm.experimental.vp.strided.store.nxv4i16.p0i16.i32( %val, i16* %ptr, i32 %strided, %m, i32 %evl) + ret void +} + +declare void @llvm.experimental.vp.strided.store.nxv8i16.p0i16.i32(, i16*, i32, , i32) + +define void @strided_vpstore_nxv8i16( %val, i16* %ptr, i32 signext %strided, %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpstore_nxv8i16: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e16, m2, ta, mu +; CHECK-RV32-NEXT: vsse16.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpstore_nxv8i16: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e16, m2, ta, mu +; CHECK-RV64-NEXT: vsse16.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + call void @llvm.experimental.vp.strided.store.nxv8i16.p0i16.i32( %val, i16* %ptr, i32 %strided, %m, i32 %evl) + ret void +} + +declare void @llvm.experimental.vp.strided.store.nxv1i32.p0i32.i32(, i32*, i32, , i32) + +define void @strided_vpstore_nxv1i32( %val, i32* %ptr, i32 signext %strided, %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpstore_nxv1i32: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-RV32-NEXT: vsse32.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpstore_nxv1i32: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-RV64-NEXT: vsse32.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + call void @llvm.experimental.vp.strided.store.nxv1i32.p0i32.i32( %val, i32* %ptr, i32 %strided, %m, i32 %evl) + ret void +} + +declare void @llvm.experimental.vp.strided.store.nxv2i32.p0i32.i32(, i32*, i32, , i32) + +define void @strided_vpstore_nxv2i32( %val, i32* %ptr, i32 signext %strided, %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpstore_nxv2i32: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-RV32-NEXT: vsse32.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpstore_nxv2i32: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-RV64-NEXT: vsse32.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + call void @llvm.experimental.vp.strided.store.nxv2i32.p0i32.i32( %val, i32* %ptr, i32 %strided, %m, i32 %evl) + ret void +} + +declare void @llvm.experimental.vp.strided.store.nxv4i32.p0i32.i32(, i32*, i32, , i32) + +define void @strided_vpstore_nxv4i32( %val, i32* %ptr, i32 signext %strided, %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpstore_nxv4i32: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-RV32-NEXT: vsse32.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpstore_nxv4i32: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-RV64-NEXT: vsse32.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + call void @llvm.experimental.vp.strided.store.nxv4i32.p0i32.i32( %val, i32* %ptr, i32 %strided, %m, i32 %evl) + ret void +} + +declare void @llvm.experimental.vp.strided.store.nxv8i32.p0i32.i32(, i32*, i32, , i32) + +define void @strided_vpstore_nxv8i32( %val, i32* %ptr, i32 signext %strided, %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpstore_nxv8i32: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m4, ta, mu +; CHECK-RV32-NEXT: vsse32.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpstore_nxv8i32: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m4, ta, mu +; CHECK-RV64-NEXT: vsse32.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + call void @llvm.experimental.vp.strided.store.nxv8i32.p0i32.i32( %val, i32* %ptr, i32 %strided, %m, i32 %evl) + ret void +} + +declare void @llvm.experimental.vp.strided.store.nxv1i64.p0i64.i32(, i64*, i32, , i32) + +define void @strided_vpstore_nxv1i64( %val, i64* %ptr, i32 signext %strided, %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpstore_nxv1i64: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-RV32-NEXT: vsse64.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpstore_nxv1i64: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-RV64-NEXT: vsse64.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + call void @llvm.experimental.vp.strided.store.nxv1i64.p0i64.i32( %val, i64* %ptr, i32 %strided, %m, i32 %evl) + ret void +} + +declare void @llvm.experimental.vp.strided.store.nxv2i64.p0i64.i32(, i64*, i32, , i32) + +define void @strided_vpstore_nxv2i64( %val, i64* %ptr, i32 signext %strided, %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpstore_nxv2i64: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-RV32-NEXT: vsse64.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpstore_nxv2i64: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-RV64-NEXT: vsse64.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + call void @llvm.experimental.vp.strided.store.nxv2i64.p0i64.i32( %val, i64* %ptr, i32 %strided, %m, i32 %evl) + ret void +} + +declare void @llvm.experimental.vp.strided.store.nxv4i64.p0i64.i32(, i64*, i32, , i32) + +define void @strided_vpstore_nxv4i64( %val, i64* %ptr, i32 signext %strided, %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpstore_nxv4i64: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-RV32-NEXT: vsse64.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpstore_nxv4i64: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-RV64-NEXT: vsse64.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + call void @llvm.experimental.vp.strided.store.nxv4i64.p0i64.i32( %val, i64* %ptr, i32 %strided, %m, i32 %evl) + ret void +} + +declare void @llvm.experimental.vp.strided.store.nxv8i64.p0i64.i32(, i64*, i32, , i32) + +define void @strided_vpstore_nxv8i64( %val, i64* %ptr, i32 signext %strided, %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpstore_nxv8i64: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-RV32-NEXT: vsse64.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpstore_nxv8i64: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-RV64-NEXT: vsse64.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + call void @llvm.experimental.vp.strided.store.nxv8i64.p0i64.i32( %val, i64* %ptr, i32 %strided, %m, i32 %evl) + ret void +} + +declare void @llvm.experimental.vp.strided.store.nxv1f16.p0f16.i32(, half*, i32, , i32) + +define void @strided_vpstore_nxv1f16( %val, half* %ptr, i32 signext %strided, %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpstore_nxv1f16: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-RV32-NEXT: vsse16.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpstore_nxv1f16: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-RV64-NEXT: vsse16.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + call void @llvm.experimental.vp.strided.store.nxv1f16.p0f16.i32( %val, half* %ptr, i32 %strided, %m, i32 %evl) + ret void +} + +declare void @llvm.experimental.vp.strided.store.nxv2f16.p0f16.i32(, half*, i32, , i32) + +define void @strided_vpstore_nxv2f16( %val, half* %ptr, i32 signext %strided, %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpstore_nxv2f16: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-RV32-NEXT: vsse16.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpstore_nxv2f16: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-RV64-NEXT: vsse16.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + call void @llvm.experimental.vp.strided.store.nxv2f16.p0f16.i32( %val, half* %ptr, i32 %strided, %m, i32 %evl) + ret void +} + +declare void @llvm.experimental.vp.strided.store.nxv4f16.p0f16.i32(, half*, i32, , i32) + +define void @strided_vpstore_nxv4f16( %val, half* %ptr, i32 signext %strided, %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpstore_nxv4f16: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-RV32-NEXT: vsse16.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpstore_nxv4f16: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-RV64-NEXT: vsse16.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + call void @llvm.experimental.vp.strided.store.nxv4f16.p0f16.i32( %val, half* %ptr, i32 %strided, %m, i32 %evl) + ret void +} + +declare void @llvm.experimental.vp.strided.store.nxv8f16.p0f16.i32(, half*, i32, , i32) + +define void @strided_vpstore_nxv8f16( %val, half* %ptr, i32 signext %strided, %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpstore_nxv8f16: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e16, m2, ta, mu +; CHECK-RV32-NEXT: vsse16.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpstore_nxv8f16: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e16, m2, ta, mu +; CHECK-RV64-NEXT: vsse16.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + call void @llvm.experimental.vp.strided.store.nxv8f16.p0f16.i32( %val, half* %ptr, i32 %strided, %m, i32 %evl) + ret void +} + +declare void @llvm.experimental.vp.strided.store.nxv1f32.p0f32.i32(, float*, i32, , i32) + +define void @strided_vpstore_nxv1f32( %val, float* %ptr, i32 signext %strided, %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpstore_nxv1f32: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-RV32-NEXT: vsse32.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpstore_nxv1f32: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-RV64-NEXT: vsse32.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + call void @llvm.experimental.vp.strided.store.nxv1f32.p0f32.i32( %val, float* %ptr, i32 %strided, %m, i32 %evl) + ret void +} + +declare void @llvm.experimental.vp.strided.store.nxv2f32.p0f32.i32(, float*, i32, , i32) + +define void @strided_vpstore_nxv2f32( %val, float* %ptr, i32 signext %strided, %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpstore_nxv2f32: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-RV32-NEXT: vsse32.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpstore_nxv2f32: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-RV64-NEXT: vsse32.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + call void @llvm.experimental.vp.strided.store.nxv2f32.p0f32.i32( %val, float* %ptr, i32 %strided, %m, i32 %evl) + ret void +} + +declare void @llvm.experimental.vp.strided.store.nxv4f32.p0f32.i32(, float*, i32, , i32) + +define void @strided_vpstore_nxv4f32( %val, float* %ptr, i32 signext %strided, %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpstore_nxv4f32: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-RV32-NEXT: vsse32.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpstore_nxv4f32: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-RV64-NEXT: vsse32.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + call void @llvm.experimental.vp.strided.store.nxv4f32.p0f32.i32( %val, float* %ptr, i32 %strided, %m, i32 %evl) + ret void +} + +declare void @llvm.experimental.vp.strided.store.nxv8f32.p0f32.i32(, float*, i32, , i32) + +define void @strided_vpstore_nxv8f32( %val, float* %ptr, i32 signext %strided, %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpstore_nxv8f32: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m4, ta, mu +; CHECK-RV32-NEXT: vsse32.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpstore_nxv8f32: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m4, ta, mu +; CHECK-RV64-NEXT: vsse32.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + call void @llvm.experimental.vp.strided.store.nxv8f32.p0f32.i32( %val, float* %ptr, i32 %strided, %m, i32 %evl) + ret void +} + +declare void @llvm.experimental.vp.strided.store.nxv1f64.p0f64.i32(, double*, i32, , i32) + +define void @strided_vpstore_nxv1f64( %val, double* %ptr, i32 signext %strided, %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpstore_nxv1f64: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-RV32-NEXT: vsse64.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpstore_nxv1f64: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-RV64-NEXT: vsse64.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + call void @llvm.experimental.vp.strided.store.nxv1f64.p0f64.i32( %val, double* %ptr, i32 %strided, %m, i32 %evl) + ret void +} + +declare void @llvm.experimental.vp.strided.store.nxv2f64.p0f64.i32(, double*, i32, , i32) + +define void @strided_vpstore_nxv2f64( %val, double* %ptr, i32 signext %strided, %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpstore_nxv2f64: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-RV32-NEXT: vsse64.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpstore_nxv2f64: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-RV64-NEXT: vsse64.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + call void @llvm.experimental.vp.strided.store.nxv2f64.p0f64.i32( %val, double* %ptr, i32 %strided, %m, i32 %evl) + ret void +} + +declare void @llvm.experimental.vp.strided.store.nxv4f64.p0f64.i32(, double*, i32, , i32) + +define void @strided_vpstore_nxv4f64( %val, double* %ptr, i32 signext %strided, %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpstore_nxv4f64: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-RV32-NEXT: vsse64.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpstore_nxv4f64: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-RV64-NEXT: vsse64.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + call void @llvm.experimental.vp.strided.store.nxv4f64.p0f64.i32( %val, double* %ptr, i32 %strided, %m, i32 %evl) + ret void +} + +declare void @llvm.experimental.vp.strided.store.nxv8f64.p0f64.i32(, double*, i32, , i32) + +define void @strided_vpstore_nxv8f64( %val, double* %ptr, i32 signext %strided, %m, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpstore_nxv8f64: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-RV32-NEXT: vsse64.v v8, (a0), a1, v0.t +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpstore_nxv8f64: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-RV64-NEXT: vsse64.v v8, (a0), a1, v0.t +; CHECK-RV64-NEXT: ret + call void @llvm.experimental.vp.strided.store.nxv8f64.p0f64.i32( %val, double* %ptr, i32 %strided, %m, i32 %evl) + ret void +} + +define void @strided_vpstore_nxv1i8_allones_mask( %val, i8* %ptr, i32 signext %strided, i32 zeroext %evl) { +; CHECK-RV32-LABEL: strided_vpstore_nxv1i8_allones_mask: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV32-NEXT: vsse8.v v8, (a0), a1 +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: strided_vpstore_nxv1i8_allones_mask: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV64-NEXT: vsse8.v v8, (a0), a1 +; CHECK-RV64-NEXT: ret + %a = insertelement poison, i1 true, i32 0 + %b = shufflevector %a, poison, zeroinitializer + call void @llvm.experimental.vp.strided.store.nxv1i8.p0i8.i32( %val, i8* %ptr, i32 %strided, %b, i32 %evl) + ret void +}