diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp @@ -1164,6 +1164,11 @@ Action = TLI.getOperationAction(Node->getOpcode(), cast(Node)->getValue().getValueType()); break; + case ISD::VP_SCATTER: + Action = TLI.getOperationAction( + Node->getOpcode(), + cast(Node)->getValue().getValueType()); + break; case ISD::VECREDUCE_FADD: case ISD::VECREDUCE_FMUL: case ISD::VECREDUCE_ADD: diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -568,8 +568,8 @@ SDValue lowerMSTORE(SDValue Op, SelectionDAG &DAG) const; SDValue lowerFixedLengthVectorFCOPYSIGNToRVV(SDValue Op, SelectionDAG &DAG) const; - SDValue lowerMGATHER(SDValue Op, SelectionDAG &DAG) const; - SDValue lowerMSCATTER(SDValue Op, SelectionDAG &DAG) const; + SDValue lowerMaskedGather(SDValue Op, SelectionDAG &DAG) const; + SDValue lowerMaskedScatter(SDValue Op, SelectionDAG &DAG) const; SDValue lowerFixedLengthVectorLoadToRVV(SDValue Op, SelectionDAG &DAG) const; SDValue lowerFixedLengthVectorStoreToRVV(SDValue Op, SelectionDAG &DAG) const; SDValue lowerFixedLengthVectorSetccToRVV(SDValue Op, SelectionDAG &DAG) const; diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -557,6 +557,9 @@ setOperationAction(ISD::MGATHER, VT, Custom); setOperationAction(ISD::MSCATTER, VT, Custom); + setOperationAction(ISD::VP_GATHER, VT, Custom); + setOperationAction(ISD::VP_SCATTER, VT, Custom); + setOperationAction(ISD::CONCAT_VECTORS, VT, Custom); setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); @@ -621,6 +624,9 @@ setOperationAction(ISD::MGATHER, VT, Custom); setOperationAction(ISD::MSCATTER, VT, Custom); + setOperationAction(ISD::VP_GATHER, VT, Custom); + setOperationAction(ISD::VP_SCATTER, VT, Custom); + setOperationAction(ISD::SELECT, VT, Custom); setOperationAction(ISD::SELECT_CC, VT, Expand); @@ -730,6 +736,10 @@ setOperationAction(ISD::MSTORE, VT, Custom); setOperationAction(ISD::MGATHER, VT, Custom); setOperationAction(ISD::MSCATTER, VT, Custom); + + setOperationAction(ISD::VP_GATHER, VT, Custom); + setOperationAction(ISD::VP_SCATTER, VT, Custom); + setOperationAction(ISD::ADD, VT, Custom); setOperationAction(ISD::MUL, VT, Custom); setOperationAction(ISD::SUB, VT, Custom); @@ -805,6 +815,10 @@ setOperationAction(ISD::MSTORE, VT, Custom); setOperationAction(ISD::MGATHER, VT, Custom); setOperationAction(ISD::MSCATTER, VT, Custom); + + setOperationAction(ISD::VP_GATHER, VT, Custom); + setOperationAction(ISD::VP_SCATTER, VT, Custom); + setOperationAction(ISD::FADD, VT, Custom); setOperationAction(ISD::FSUB, VT, Custom); setOperationAction(ISD::FMUL, VT, Custom); @@ -873,6 +887,8 @@ setTargetDAGCombine(ISD::FCOPYSIGN); setTargetDAGCombine(ISD::MGATHER); setTargetDAGCombine(ISD::MSCATTER); + setTargetDAGCombine(ISD::VP_GATHER); + setTargetDAGCombine(ISD::VP_SCATTER); setTargetDAGCombine(ISD::SRA); setTargetDAGCombine(ISD::SRL); setTargetDAGCombine(ISD::SHL); @@ -2714,9 +2730,11 @@ case ISD::FCOPYSIGN: return lowerFixedLengthVectorFCOPYSIGNToRVV(Op, DAG); case ISD::MGATHER: - return lowerMGATHER(Op, DAG); + case ISD::VP_GATHER: + return lowerMaskedGather(Op, DAG); case ISD::MSCATTER: - return lowerMSCATTER(Op, DAG); + case ISD::VP_SCATTER: + return lowerMaskedScatter(Op, DAG); case ISD::FLT_ROUNDS_: return lowerGET_ROUNDING(Op, DAG); case ISD::SET_ROUNDING: @@ -4688,36 +4706,62 @@ return convertFromScalableVector(VT, VPOp, DAG, Subtarget); } -// Custom lower MGATHER to a legalized form for RVV. It will then be matched to -// a RVV indexed load. The RVV indexed load instructions only support the -// "unsigned unscaled" addressing mode; indices are implicitly zero-extended or -// truncated to XLEN and are treated as byte offsets. Any signed or scaled -// indexing is extended to the XLEN value type and scaled accordingly. -SDValue RISCVTargetLowering::lowerMGATHER(SDValue Op, SelectionDAG &DAG) const { - auto *MGN = cast(Op.getNode()); +// Custom lower MGATHER/VP_GATHER to a legalized form for RVV. It will then be +// matched to a RVV indexed load. The RVV indexed load instructions only +// support the "unsigned unscaled" addressing mode; indices are implicitly +// zero-extended or truncated to XLEN and are treated as byte offsets. Any +// signed or scaled indexing is extended to the XLEN value type and scaled +// accordingly. +SDValue RISCVTargetLowering::lowerMaskedGather(SDValue Op, + SelectionDAG &DAG) const { SDLoc DL(Op); + MVT VT = Op.getSimpleValueType(); - SDValue Index = MGN->getIndex(); - SDValue Mask = MGN->getMask(); - SDValue PassThru = MGN->getPassThru(); + EVT MemVT; + MachineMemOperand *MMO; + ISD::LoadExtType LoadExtType; + SDValue Chain, BasePtr, Index, Mask, PassThru, VL; + + if (auto *VPGN = dyn_cast(Op.getNode())) { + MemVT = VPGN->getMemoryVT(); + MMO = VPGN->getMemOperand(); + + Chain = VPGN->getChain(); + BasePtr = VPGN->getBasePtr(); + Index = VPGN->getIndex(); + Mask = VPGN->getMask(); + PassThru = DAG.getUNDEF(VT); + VL = VPGN->getVectorLength(); + // VP doesn't support extending loads. + LoadExtType = ISD::NON_EXTLOAD; + } else { + auto *MGN = cast(Op.getNode()); + // Else it must be a MGATHER. + MemVT = MGN->getMemoryVT(); + MMO = MGN->getMemOperand(); + + Chain = MGN->getChain(); + BasePtr = MGN->getBasePtr(); + Index = MGN->getIndex(); + Mask = MGN->getMask(); + PassThru = MGN->getPassThru(); + LoadExtType = MGN->getExtensionType(); + } - MVT VT = Op.getSimpleValueType(); MVT IndexVT = Index.getSimpleValueType(); MVT XLenVT = Subtarget.getXLenVT(); assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() && "Unexpected VTs!"); - assert(MGN->getBasePtr().getSimpleValueType() == XLenVT && - "Unexpected pointer type"); + assert(BasePtr.getSimpleValueType() == XLenVT && "Unexpected pointer type"); // Targets have to explicitly opt-in for extending vector loads. - assert(MGN->getExtensionType() == ISD::NON_EXTLOAD && - "Unexpected extending MGATHER"); + assert(LoadExtType == ISD::NON_EXTLOAD && + "Unexpected extending MGATHER/VP_GATHER"); // If the mask is known to be all ones, optimize to an unmasked intrinsic; // the selection of the masked intrinsics doesn't do this for us. bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode()); - SDValue VL; MVT ContainerVT = VT; if (VT.isFixedLengthVector()) { // We need to use the larger of the result and index type to determine the @@ -4740,18 +4784,17 @@ Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget); PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget); } + } - VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT); - } else - VL = DAG.getRegister(RISCV::X0, XLenVT); + if (!VL) + VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second; unsigned IntID = IsUnmasked ? Intrinsic::riscv_vluxei : Intrinsic::riscv_vluxei_mask; - SmallVector Ops{MGN->getChain(), - DAG.getTargetConstant(IntID, DL, XLenVT)}; + SmallVector Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)}; if (!IsUnmasked) Ops.push_back(PassThru); - Ops.push_back(MGN->getBasePtr()); + Ops.push_back(BasePtr); Ops.push_back(Index); if (!IsUnmasked) Ops.push_back(Mask); @@ -4759,9 +4802,8 @@ SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other}); SDValue Result = - DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, - MGN->getMemoryVT(), MGN->getMemOperand()); - SDValue Chain = Result.getValue(1); + DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, MemVT, MMO); + Chain = Result.getValue(1); if (VT.isFixedLengthVector()) Result = convertFromScalableVector(VT, Result, DAG, Subtarget); @@ -4769,18 +4811,43 @@ return DAG.getMergeValues({Result, Chain}, DL); } -// Custom lower MSCATTER to a legalized form for RVV. It will then be matched to -// a RVV indexed store. The RVV indexed store instructions only support the -// "unsigned unscaled" addressing mode; indices are implicitly zero-extended or -// truncated to XLEN and are treated as byte offsets. Any signed or scaled -// indexing is extended to the XLEN value type and scaled accordingly. -SDValue RISCVTargetLowering::lowerMSCATTER(SDValue Op, - SelectionDAG &DAG) const { - auto *MSN = cast(Op.getNode()); +// Custom lower MSCATTER/VP_SCATTER to a legalized form for RVV. It will then be +// matched to a RVV indexed store. The RVV indexed store instructions only +// support the "unsigned unscaled" addressing mode; indices are implicitly +// zero-extended or truncated to XLEN and are treated as byte offsets. Any +// signed or scaled indexing is extended to the XLEN value type and scaled +// accordingly. +SDValue RISCVTargetLowering::lowerMaskedScatter(SDValue Op, + SelectionDAG &DAG) const { SDLoc DL(Op); - SDValue Index = MSN->getIndex(); - SDValue Mask = MSN->getMask(); - SDValue Val = MSN->getValue(); + EVT MemVT; + MachineMemOperand *MMO; + bool IsTruncatingStore = false; + SDValue Chain, Index, Mask, Val, BasePtr, VL; + + if (auto *VPSN = dyn_cast(Op.getNode())) { + MMO = VPSN->getMemOperand(); + MemVT = VPSN->getMemoryVT(); + Chain = VPSN->getChain(); + Index = VPSN->getIndex(); + Mask = VPSN->getMask(); + Val = VPSN->getValue(); + BasePtr = VPSN->getBasePtr(); + VL = VPSN->getVectorLength(); + // VP doesn't support truncating stores. + IsTruncatingStore = false; + } else { + // Else it must be a MSCATTER. + auto *MSN = cast(Op.getNode()); + MMO = MSN->getMemOperand(); + MemVT = MSN->getMemoryVT(); + Chain = MSN->getChain(); + Index = MSN->getIndex(); + Mask = MSN->getMask(); + Val = MSN->getValue(); + BasePtr = MSN->getBasePtr(); + IsTruncatingStore = MSN->isTruncatingStore(); + } MVT VT = Val.getSimpleValueType(); MVT IndexVT = Index.getSimpleValueType(); @@ -4788,21 +4855,19 @@ assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() && "Unexpected VTs!"); - assert(MSN->getBasePtr().getSimpleValueType() == XLenVT && - "Unexpected pointer type"); + assert(BasePtr.getSimpleValueType() == XLenVT && "Unexpected pointer type"); // Targets have to explicitly opt-in for extending vector loads and // truncating vector stores. - assert(!MSN->isTruncatingStore() && "Unexpected extending MSCATTER"); + assert(!IsTruncatingStore && "Unexpected truncating MSCATTER/VP_SCATTER"); // If the mask is known to be all ones, optimize to an unmasked intrinsic; // the selection of the masked intrinsics doesn't do this for us. bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode()); - SDValue VL; + MVT ContainerVT = VT; if (VT.isFixedLengthVector()) { // We need to use the larger of the value and index type to determine the // scalable type to use so we don't increase LMUL for any operand/result. - MVT ContainerVT; if (VT.bitsGE(IndexVT)) { ContainerVT = getContainerForFixedLengthVector(VT); IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(), @@ -4821,24 +4886,23 @@ MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount()); Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget); } + } - VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT); - } else - VL = DAG.getRegister(RISCV::X0, XLenVT); + if (!VL) + VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second; unsigned IntID = IsUnmasked ? Intrinsic::riscv_vsoxei : Intrinsic::riscv_vsoxei_mask; - SmallVector Ops{MSN->getChain(), - DAG.getTargetConstant(IntID, DL, XLenVT)}; + SmallVector Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)}; Ops.push_back(Val); - Ops.push_back(MSN->getBasePtr()); + Ops.push_back(BasePtr); Ops.push_back(Index); if (!IsUnmasked) Ops.push_back(Mask); Ops.push_back(VL); - return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL, MSN->getVTList(), Ops, - MSN->getMemoryVT(), MSN->getMemOperand()); + return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL, + DAG.getVTList(MVT::Other), Ops, MemVT, MMO); } SDValue RISCVTargetLowering::lowerGET_ROUNDING(SDValue Op, @@ -6336,18 +6400,34 @@ DAG.getNode(ISD::FNEG, DL, VT, NewFPExtRound)); } case ISD::MGATHER: - case ISD::MSCATTER: { + case ISD::MSCATTER: + case ISD::VP_GATHER: + case ISD::VP_SCATTER: { if (!DCI.isBeforeLegalize()) break; - MaskedGatherScatterSDNode *MGSN = cast(N); - SDValue Index = MGSN->getIndex(); + SDValue Index, ScaleOp; + bool IsIndexScaled = false; + bool IsIndexSigned = false; + if (N->getOpcode() == ISD::VP_GATHER || N->getOpcode() == ISD::VP_SCATTER) { + const auto *VPGSN = cast(N); + Index = VPGSN->getIndex(); + ScaleOp = VPGSN->getScale(); + IsIndexScaled = VPGSN->isIndexScaled(); + IsIndexSigned = VPGSN->isIndexSigned(); + } else { + const auto *MGSN = cast(N); + Index = MGSN->getIndex(); + ScaleOp = MGSN->getScale(); + IsIndexScaled = MGSN->isIndexScaled(); + IsIndexSigned = MGSN->isIndexSigned(); + } EVT IndexVT = Index.getValueType(); MVT XLenVT = Subtarget.getXLenVT(); // RISCV indexed loads only support the "unsigned unscaled" addressing // mode, so anything else must be manually legalized. - bool NeedsIdxLegalization = MGSN->isIndexScaled() || - (MGSN->isIndexSigned() && - IndexVT.getVectorElementType().bitsLT(XLenVT)); + bool NeedsIdxLegalization = + IsIndexScaled || + (IsIndexSigned && IndexVT.getVectorElementType().bitsLT(XLenVT)); if (!NeedsIdxLegalization) break; @@ -6356,36 +6436,48 @@ // Any index legalization should first promote to XLenVT, so we don't lose // bits when scaling. This may create an illegal index type so we let // LLVM's legalization take care of the splitting. + // FIXME: LLVM can't split VP_GATHER or VP_SCATTER yet. if (IndexVT.getVectorElementType().bitsLT(XLenVT)) { IndexVT = IndexVT.changeVectorElementType(XLenVT); - Index = DAG.getNode(MGSN->isIndexSigned() ? ISD::SIGN_EXTEND - : ISD::ZERO_EXTEND, + Index = DAG.getNode(IsIndexSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, DL, IndexVT, Index); } - unsigned Scale = N->getConstantOperandVal(5); - if (MGSN->isIndexScaled() && Scale != 1) { + unsigned Scale = cast(ScaleOp)->getZExtValue(); + if (IsIndexScaled && Scale != 1) { // Manually scale the indices by the element size. // TODO: Sanitize the scale operand here? + // TODO: For VP nodes, should we use VP_SHL here? assert(isPowerOf2_32(Scale) && "Expecting power-of-two types"); SDValue SplatScale = DAG.getConstant(Log2_32(Scale), DL, IndexVT); Index = DAG.getNode(ISD::SHL, DL, IndexVT, Index, SplatScale); } ISD::MemIndexType NewIndexTy = ISD::UNSIGNED_UNSCALED; - if (const auto *MGN = dyn_cast(N)) { + if (const auto *VPGN = dyn_cast(N)) + return DAG.getGatherVP(N->getVTList(), VPGN->getMemoryVT(), DL, + {VPGN->getChain(), VPGN->getBasePtr(), Index, + VPGN->getScale(), VPGN->getMask(), + VPGN->getVectorLength()}, + VPGN->getMemOperand(), NewIndexTy); + if (const auto *VPSN = dyn_cast(N)) + return DAG.getScatterVP(N->getVTList(), VPSN->getMemoryVT(), DL, + {VPSN->getChain(), VPSN->getValue(), + VPSN->getBasePtr(), Index, VPSN->getScale(), + VPSN->getMask(), VPSN->getVectorLength()}, + VPSN->getMemOperand(), NewIndexTy); + if (const auto *MGN = dyn_cast(N)) return DAG.getMaskedGather( - N->getVTList(), MGSN->getMemoryVT(), DL, - {MGSN->getChain(), MGN->getPassThru(), MGSN->getMask(), - MGSN->getBasePtr(), Index, MGN->getScale()}, + N->getVTList(), MGN->getMemoryVT(), DL, + {MGN->getChain(), MGN->getPassThru(), MGN->getMask(), + MGN->getBasePtr(), Index, MGN->getScale()}, MGN->getMemOperand(), NewIndexTy, MGN->getExtensionType()); - } const auto *MSN = cast(N); return DAG.getMaskedScatter( - N->getVTList(), MGSN->getMemoryVT(), DL, - {MGSN->getChain(), MSN->getValue(), MGSN->getMask(), MGSN->getBasePtr(), - Index, MGSN->getScale()}, - MGSN->getMemOperand(), NewIndexTy, MSN->isTruncatingStore()); + N->getVTList(), MSN->getMemoryVT(), DL, + {MSN->getChain(), MSN->getValue(), MSN->getMask(), MSN->getBasePtr(), + Index, MSN->getScale()}, + MSN->getMemOperand(), NewIndexTy, MSN->isTruncatingStore()); } case RISCVISD::SRA_VL: case RISCVISD::SRL_VL: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll @@ -0,0 +1,1680 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zfh,+experimental-v -riscv-v-vector-bits-min=128 \ +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zfh,+experimental-v -riscv-v-vector-bits-min=128 \ +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 + +declare <2 x i8> @llvm.vp.gather.v2i8.v2p0i8(<2 x i8*>, <2 x i1>, i32) + +define <2 x i8> @vpgather_v2i8(<2 x i8*> %ptrs, <2 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_v2i8: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; RV32-NEXT: vluxei32.v v25, (zero), v8, v0.t +; RV32-NEXT: vmv1r.v v8, v25 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_v2i8: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; RV64-NEXT: vluxei64.v v25, (zero), v8, v0.t +; RV64-NEXT: vmv1r.v v8, v25 +; RV64-NEXT: ret + %v = call <2 x i8> @llvm.vp.gather.v2i8.v2p0i8(<2 x i8*> %ptrs, <2 x i1> %m, i32 %evl) + ret <2 x i8> %v +} + +define <2 x i16> @vpgather_v2i8_sextload_v2i16(<2 x i8*> %ptrs, <2 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_v2i8_sextload_v2i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; RV32-NEXT: vluxei32.v v25, (zero), v8, v0.t +; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; RV32-NEXT: vsext.vf2 v8, v25 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_v2i8_sextload_v2i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; RV64-NEXT: vluxei64.v v25, (zero), v8, v0.t +; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; RV64-NEXT: vsext.vf2 v8, v25 +; RV64-NEXT: ret + %v = call <2 x i8> @llvm.vp.gather.v2i8.v2p0i8(<2 x i8*> %ptrs, <2 x i1> %m, i32 %evl) + %ev = sext <2 x i8> %v to <2 x i16> + ret <2 x i16> %ev +} + +define <2 x i16> @vpgather_v2i8_zextload_v2i16(<2 x i8*> %ptrs, <2 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_v2i8_zextload_v2i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; RV32-NEXT: vluxei32.v v25, (zero), v8, v0.t +; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; RV32-NEXT: vzext.vf2 v8, v25 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_v2i8_zextload_v2i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; RV64-NEXT: vluxei64.v v25, (zero), v8, v0.t +; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; RV64-NEXT: vzext.vf2 v8, v25 +; RV64-NEXT: ret + %v = call <2 x i8> @llvm.vp.gather.v2i8.v2p0i8(<2 x i8*> %ptrs, <2 x i1> %m, i32 %evl) + %ev = zext <2 x i8> %v to <2 x i16> + ret <2 x i16> %ev +} + +define <2 x i32> @vpgather_v2i8_sextload_v2i32(<2 x i8*> %ptrs, <2 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_v2i8_sextload_v2i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; RV32-NEXT: vluxei32.v v25, (zero), v8, v0.t +; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV32-NEXT: vsext.vf4 v8, v25 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_v2i8_sextload_v2i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; RV64-NEXT: vluxei64.v v25, (zero), v8, v0.t +; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV64-NEXT: vsext.vf4 v8, v25 +; RV64-NEXT: ret + %v = call <2 x i8> @llvm.vp.gather.v2i8.v2p0i8(<2 x i8*> %ptrs, <2 x i1> %m, i32 %evl) + %ev = sext <2 x i8> %v to <2 x i32> + ret <2 x i32> %ev +} + +define <2 x i32> @vpgather_v2i8_zextload_v2i32(<2 x i8*> %ptrs, <2 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_v2i8_zextload_v2i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; RV32-NEXT: vluxei32.v v25, (zero), v8, v0.t +; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV32-NEXT: vzext.vf4 v8, v25 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_v2i8_zextload_v2i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; RV64-NEXT: vluxei64.v v25, (zero), v8, v0.t +; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV64-NEXT: vzext.vf4 v8, v25 +; RV64-NEXT: ret + %v = call <2 x i8> @llvm.vp.gather.v2i8.v2p0i8(<2 x i8*> %ptrs, <2 x i1> %m, i32 %evl) + %ev = zext <2 x i8> %v to <2 x i32> + ret <2 x i32> %ev +} + +define <2 x i64> @vpgather_v2i8_sextload_v2i64(<2 x i8*> %ptrs, <2 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_v2i8_sextload_v2i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; RV32-NEXT: vluxei32.v v25, (zero), v8, v0.t +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsext.vf8 v8, v25 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_v2i8_sextload_v2i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; RV64-NEXT: vluxei64.v v25, (zero), v8, v0.t +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV64-NEXT: vsext.vf8 v8, v25 +; RV64-NEXT: ret + %v = call <2 x i8> @llvm.vp.gather.v2i8.v2p0i8(<2 x i8*> %ptrs, <2 x i1> %m, i32 %evl) + %ev = sext <2 x i8> %v to <2 x i64> + ret <2 x i64> %ev +} + +define <2 x i64> @vpgather_v2i8_zextload_v2i64(<2 x i8*> %ptrs, <2 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_v2i8_zextload_v2i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; RV32-NEXT: vluxei32.v v25, (zero), v8, v0.t +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vzext.vf8 v8, v25 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_v2i8_zextload_v2i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; RV64-NEXT: vluxei64.v v25, (zero), v8, v0.t +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV64-NEXT: vzext.vf8 v8, v25 +; RV64-NEXT: ret + %v = call <2 x i8> @llvm.vp.gather.v2i8.v2p0i8(<2 x i8*> %ptrs, <2 x i1> %m, i32 %evl) + %ev = zext <2 x i8> %v to <2 x i64> + ret <2 x i64> %ev +} + +declare <4 x i8> @llvm.vp.gather.v4i8.v4p0i8(<4 x i8*>, <4 x i1>, i32) + +define <4 x i8> @vpgather_v4i8(<4 x i8*> %ptrs, <4 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_v4i8: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; RV32-NEXT: vluxei32.v v25, (zero), v8, v0.t +; RV32-NEXT: vmv1r.v v8, v25 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_v4i8: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; RV64-NEXT: vluxei64.v v25, (zero), v8, v0.t +; RV64-NEXT: vmv1r.v v8, v25 +; RV64-NEXT: ret + %v = call <4 x i8> @llvm.vp.gather.v4i8.v4p0i8(<4 x i8*> %ptrs, <4 x i1> %m, i32 %evl) + ret <4 x i8> %v +} + +define <4 x i8> @vpgather_truemask_v4i8(<4 x i8*> %ptrs, i32 zeroext %evl) { +; RV32-LABEL: vpgather_truemask_v4i8: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; RV32-NEXT: vluxei32.v v25, (zero), v8 +; RV32-NEXT: vmv1r.v v8, v25 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_truemask_v4i8: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; RV64-NEXT: vluxei64.v v25, (zero), v8 +; RV64-NEXT: vmv1r.v v8, v25 +; RV64-NEXT: ret + %mhead = insertelement <4 x i1> undef, i1 1, i32 0 + %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer + %v = call <4 x i8> @llvm.vp.gather.v4i8.v4p0i8(<4 x i8*> %ptrs, <4 x i1> %mtrue, i32 %evl) + ret <4 x i8> %v +} + +declare <8 x i8> @llvm.vp.gather.v8i8.v8p0i8(<8 x i8*>, <8 x i1>, i32) + +define <8 x i8> @vpgather_v8i8(<8 x i8*> %ptrs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_v8i8: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; RV32-NEXT: vluxei32.v v25, (zero), v8, v0.t +; RV32-NEXT: vmv1r.v v8, v25 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_v8i8: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; RV64-NEXT: vluxei64.v v25, (zero), v8, v0.t +; RV64-NEXT: vmv1r.v v8, v25 +; RV64-NEXT: ret + %v = call <8 x i8> @llvm.vp.gather.v8i8.v8p0i8(<8 x i8*> %ptrs, <8 x i1> %m, i32 %evl) + ret <8 x i8> %v +} + +define <8 x i8> @vpgather_baseidx_v8i8(i8* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_baseidx_v8i8: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsext.vf4 v26, v8 +; RV32-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; RV32-NEXT: vluxei32.v v8, (a0), v26, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_baseidx_v8i8: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsext.vf8 v28, v8 +; RV64-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; RV64-NEXT: vluxei64.v v8, (a0), v28, v0.t +; RV64-NEXT: ret + %ptrs = getelementptr inbounds i8, i8* %base, <8 x i8> %idxs + %v = call <8 x i8> @llvm.vp.gather.v8i8.v8p0i8(<8 x i8*> %ptrs, <8 x i1> %m, i32 %evl) + ret <8 x i8> %v +} + +declare <2 x i16> @llvm.vp.gather.v2i16.v2p0i16(<2 x i16*>, <2 x i1>, i32) + +define <2 x i16> @vpgather_v2i16(<2 x i16*> %ptrs, <2 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_v2i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; RV32-NEXT: vluxei32.v v25, (zero), v8, v0.t +; RV32-NEXT: vmv1r.v v8, v25 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_v2i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; RV64-NEXT: vluxei64.v v25, (zero), v8, v0.t +; RV64-NEXT: vmv1r.v v8, v25 +; RV64-NEXT: ret + %v = call <2 x i16> @llvm.vp.gather.v2i16.v2p0i16(<2 x i16*> %ptrs, <2 x i1> %m, i32 %evl) + ret <2 x i16> %v +} + +define <2 x i32> @vpgather_v2i16_sextload_v2i32(<2 x i16*> %ptrs, <2 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_v2i16_sextload_v2i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; RV32-NEXT: vluxei32.v v25, (zero), v8, v0.t +; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV32-NEXT: vsext.vf2 v8, v25 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_v2i16_sextload_v2i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; RV64-NEXT: vluxei64.v v25, (zero), v8, v0.t +; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV64-NEXT: vsext.vf2 v8, v25 +; RV64-NEXT: ret + %v = call <2 x i16> @llvm.vp.gather.v2i16.v2p0i16(<2 x i16*> %ptrs, <2 x i1> %m, i32 %evl) + %ev = sext <2 x i16> %v to <2 x i32> + ret <2 x i32> %ev +} + +define <2 x i32> @vpgather_v2i16_zextload_v2i32(<2 x i16*> %ptrs, <2 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_v2i16_zextload_v2i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; RV32-NEXT: vluxei32.v v25, (zero), v8, v0.t +; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV32-NEXT: vzext.vf2 v8, v25 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_v2i16_zextload_v2i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; RV64-NEXT: vluxei64.v v25, (zero), v8, v0.t +; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV64-NEXT: vzext.vf2 v8, v25 +; RV64-NEXT: ret + %v = call <2 x i16> @llvm.vp.gather.v2i16.v2p0i16(<2 x i16*> %ptrs, <2 x i1> %m, i32 %evl) + %ev = zext <2 x i16> %v to <2 x i32> + ret <2 x i32> %ev +} + +define <2 x i64> @vpgather_v2i16_sextload_v2i64(<2 x i16*> %ptrs, <2 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_v2i16_sextload_v2i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; RV32-NEXT: vluxei32.v v25, (zero), v8, v0.t +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsext.vf4 v8, v25 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_v2i16_sextload_v2i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; RV64-NEXT: vluxei64.v v25, (zero), v8, v0.t +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV64-NEXT: vsext.vf4 v8, v25 +; RV64-NEXT: ret + %v = call <2 x i16> @llvm.vp.gather.v2i16.v2p0i16(<2 x i16*> %ptrs, <2 x i1> %m, i32 %evl) + %ev = sext <2 x i16> %v to <2 x i64> + ret <2 x i64> %ev +} + +define <2 x i64> @vpgather_v2i16_zextload_v2i64(<2 x i16*> %ptrs, <2 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_v2i16_zextload_v2i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; RV32-NEXT: vluxei32.v v25, (zero), v8, v0.t +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vzext.vf4 v8, v25 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_v2i16_zextload_v2i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; RV64-NEXT: vluxei64.v v25, (zero), v8, v0.t +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV64-NEXT: vzext.vf4 v8, v25 +; RV64-NEXT: ret + %v = call <2 x i16> @llvm.vp.gather.v2i16.v2p0i16(<2 x i16*> %ptrs, <2 x i1> %m, i32 %evl) + %ev = zext <2 x i16> %v to <2 x i64> + ret <2 x i64> %ev +} + +declare <4 x i16> @llvm.vp.gather.v4i16.v4p0i16(<4 x i16*>, <4 x i1>, i32) + +define <4 x i16> @vpgather_v4i16(<4 x i16*> %ptrs, <4 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_v4i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV32-NEXT: vluxei32.v v25, (zero), v8, v0.t +; RV32-NEXT: vmv1r.v v8, v25 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_v4i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV64-NEXT: vluxei64.v v25, (zero), v8, v0.t +; RV64-NEXT: vmv1r.v v8, v25 +; RV64-NEXT: ret + %v = call <4 x i16> @llvm.vp.gather.v4i16.v4p0i16(<4 x i16*> %ptrs, <4 x i1> %m, i32 %evl) + ret <4 x i16> %v +} + +define <4 x i16> @vpgather_truemask_v4i16(<4 x i16*> %ptrs, i32 zeroext %evl) { +; RV32-LABEL: vpgather_truemask_v4i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV32-NEXT: vluxei32.v v25, (zero), v8 +; RV32-NEXT: vmv1r.v v8, v25 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_truemask_v4i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV64-NEXT: vluxei64.v v25, (zero), v8 +; RV64-NEXT: vmv1r.v v8, v25 +; RV64-NEXT: ret + %mhead = insertelement <4 x i1> undef, i1 1, i32 0 + %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer + %v = call <4 x i16> @llvm.vp.gather.v4i16.v4p0i16(<4 x i16*> %ptrs, <4 x i1> %mtrue, i32 %evl) + ret <4 x i16> %v +} + +declare <8 x i16> @llvm.vp.gather.v8i16.v8p0i16(<8 x i16*>, <8 x i1>, i32) + +define <8 x i16> @vpgather_v8i16(<8 x i16*> %ptrs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_v8i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; RV32-NEXT: vluxei32.v v25, (zero), v8, v0.t +; RV32-NEXT: vmv1r.v v8, v25 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_v8i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; RV64-NEXT: vluxei64.v v25, (zero), v8, v0.t +; RV64-NEXT: vmv1r.v v8, v25 +; RV64-NEXT: ret + %v = call <8 x i16> @llvm.vp.gather.v8i16.v8p0i16(<8 x i16*> %ptrs, <8 x i1> %m, i32 %evl) + ret <8 x i16> %v +} + +define <8 x i16> @vpgather_baseidx_v8i8_v8i16(i16* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_baseidx_v8i8_v8i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsext.vf4 v26, v8 +; RV32-NEXT: vadd.vv v26, v26, v26 +; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; RV32-NEXT: vluxei32.v v8, (a0), v26, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_baseidx_v8i8_v8i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsext.vf8 v28, v8 +; RV64-NEXT: vadd.vv v28, v28, v28 +; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; RV64-NEXT: vluxei64.v v8, (a0), v28, v0.t +; RV64-NEXT: ret + %ptrs = getelementptr inbounds i16, i16* %base, <8 x i8> %idxs + %v = call <8 x i16> @llvm.vp.gather.v8i16.v8p0i16(<8 x i16*> %ptrs, <8 x i1> %m, i32 %evl) + ret <8 x i16> %v +} + +define <8 x i16> @vpgather_baseidx_sext_v8i8_v8i16(i16* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_baseidx_sext_v8i8_v8i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsext.vf4 v26, v8 +; RV32-NEXT: vadd.vv v26, v26, v26 +; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; RV32-NEXT: vluxei32.v v8, (a0), v26, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_baseidx_sext_v8i8_v8i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsext.vf8 v28, v8 +; RV64-NEXT: vadd.vv v28, v28, v28 +; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; RV64-NEXT: vluxei64.v v8, (a0), v28, v0.t +; RV64-NEXT: ret + %eidxs = sext <8 x i8> %idxs to <8 x i16> + %ptrs = getelementptr inbounds i16, i16* %base, <8 x i16> %eidxs + %v = call <8 x i16> @llvm.vp.gather.v8i16.v8p0i16(<8 x i16*> %ptrs, <8 x i1> %m, i32 %evl) + ret <8 x i16> %v +} + +define <8 x i16> @vpgather_baseidx_zext_v8i8_v8i16(i16* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_baseidx_zext_v8i8_v8i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vzext.vf4 v26, v8 +; RV32-NEXT: vadd.vv v26, v26, v26 +; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; RV32-NEXT: vluxei32.v v8, (a0), v26, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_baseidx_zext_v8i8_v8i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vzext.vf8 v28, v8 +; RV64-NEXT: vadd.vv v28, v28, v28 +; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; RV64-NEXT: vluxei64.v v8, (a0), v28, v0.t +; RV64-NEXT: ret + %eidxs = zext <8 x i8> %idxs to <8 x i16> + %ptrs = getelementptr inbounds i16, i16* %base, <8 x i16> %eidxs + %v = call <8 x i16> @llvm.vp.gather.v8i16.v8p0i16(<8 x i16*> %ptrs, <8 x i1> %m, i32 %evl) + ret <8 x i16> %v +} + +define <8 x i16> @vpgather_baseidx_v8i16(i16* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_baseidx_v8i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsext.vf2 v26, v8 +; RV32-NEXT: vadd.vv v26, v26, v26 +; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; RV32-NEXT: vluxei32.v v8, (a0), v26, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_baseidx_v8i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsext.vf4 v28, v8 +; RV64-NEXT: vadd.vv v28, v28, v28 +; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; RV64-NEXT: vluxei64.v v8, (a0), v28, v0.t +; RV64-NEXT: ret + %ptrs = getelementptr inbounds i16, i16* %base, <8 x i16> %idxs + %v = call <8 x i16> @llvm.vp.gather.v8i16.v8p0i16(<8 x i16*> %ptrs, <8 x i1> %m, i32 %evl) + ret <8 x i16> %v +} + +declare <2 x i32> @llvm.vp.gather.v2i32.v2p0i32(<2 x i32*>, <2 x i1>, i32) + +define <2 x i32> @vpgather_v2i32(<2 x i32*> %ptrs, <2 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_v2i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; RV32-NEXT: vluxei32.v v8, (zero), v8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_v2i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; RV64-NEXT: vluxei64.v v25, (zero), v8, v0.t +; RV64-NEXT: vmv1r.v v8, v25 +; RV64-NEXT: ret + %v = call <2 x i32> @llvm.vp.gather.v2i32.v2p0i32(<2 x i32*> %ptrs, <2 x i1> %m, i32 %evl) + ret <2 x i32> %v +} + +define <2 x i64> @vpgather_v2i32_sextload_v2i64(<2 x i32*> %ptrs, <2 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_v2i32_sextload_v2i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; RV32-NEXT: vluxei32.v v25, (zero), v8, v0.t +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsext.vf2 v8, v25 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_v2i32_sextload_v2i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; RV64-NEXT: vluxei64.v v25, (zero), v8, v0.t +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV64-NEXT: vsext.vf2 v8, v25 +; RV64-NEXT: ret + %v = call <2 x i32> @llvm.vp.gather.v2i32.v2p0i32(<2 x i32*> %ptrs, <2 x i1> %m, i32 %evl) + %ev = sext <2 x i32> %v to <2 x i64> + ret <2 x i64> %ev +} + +define <2 x i64> @vpgather_v2i32_zextload_v2i64(<2 x i32*> %ptrs, <2 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_v2i32_zextload_v2i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; RV32-NEXT: vluxei32.v v25, (zero), v8, v0.t +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vzext.vf2 v8, v25 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_v2i32_zextload_v2i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; RV64-NEXT: vluxei64.v v25, (zero), v8, v0.t +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV64-NEXT: vzext.vf2 v8, v25 +; RV64-NEXT: ret + %v = call <2 x i32> @llvm.vp.gather.v2i32.v2p0i32(<2 x i32*> %ptrs, <2 x i1> %m, i32 %evl) + %ev = zext <2 x i32> %v to <2 x i64> + ret <2 x i64> %ev +} + +declare <4 x i32> @llvm.vp.gather.v4i32.v4p0i32(<4 x i32*>, <4 x i1>, i32) + +define <4 x i32> @vpgather_v4i32(<4 x i32*> %ptrs, <4 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_v4i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; RV32-NEXT: vluxei32.v v8, (zero), v8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_v4i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; RV64-NEXT: vluxei64.v v25, (zero), v8, v0.t +; RV64-NEXT: vmv1r.v v8, v25 +; RV64-NEXT: ret + %v = call <4 x i32> @llvm.vp.gather.v4i32.v4p0i32(<4 x i32*> %ptrs, <4 x i1> %m, i32 %evl) + ret <4 x i32> %v +} + +define <4 x i32> @vpgather_truemask_v4i32(<4 x i32*> %ptrs, i32 zeroext %evl) { +; RV32-LABEL: vpgather_truemask_v4i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; RV32-NEXT: vluxei32.v v8, (zero), v8 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_truemask_v4i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; RV64-NEXT: vluxei64.v v25, (zero), v8 +; RV64-NEXT: vmv1r.v v8, v25 +; RV64-NEXT: ret + %mhead = insertelement <4 x i1> undef, i1 1, i32 0 + %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer + %v = call <4 x i32> @llvm.vp.gather.v4i32.v4p0i32(<4 x i32*> %ptrs, <4 x i1> %mtrue, i32 %evl) + ret <4 x i32> %v +} + +declare <8 x i32> @llvm.vp.gather.v8i32.v8p0i32(<8 x i32*>, <8 x i1>, i32) + +define <8 x i32> @vpgather_v8i32(<8 x i32*> %ptrs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_v8i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; RV32-NEXT: vluxei32.v v8, (zero), v8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_v8i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; RV64-NEXT: vluxei64.v v26, (zero), v8, v0.t +; RV64-NEXT: vmv2r.v v8, v26 +; RV64-NEXT: ret + %v = call <8 x i32> @llvm.vp.gather.v8i32.v8p0i32(<8 x i32*> %ptrs, <8 x i1> %m, i32 %evl) + ret <8 x i32> %v +} + +define <8 x i32> @vpgather_baseidx_v8i8_v8i32(i32* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_baseidx_v8i8_v8i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsext.vf4 v26, v8 +; RV32-NEXT: vsll.vi v26, v26, 2 +; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV32-NEXT: vluxei32.v v8, (a0), v26, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_baseidx_v8i8_v8i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsext.vf8 v28, v8 +; RV64-NEXT: vsll.vi v28, v28, 2 +; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV64-NEXT: vluxei64.v v8, (a0), v28, v0.t +; RV64-NEXT: ret + %ptrs = getelementptr inbounds i32, i32* %base, <8 x i8> %idxs + %v = call <8 x i32> @llvm.vp.gather.v8i32.v8p0i32(<8 x i32*> %ptrs, <8 x i1> %m, i32 %evl) + ret <8 x i32> %v +} + +define <8 x i32> @vpgather_baseidx_sext_v8i8_v8i32(i32* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_baseidx_sext_v8i8_v8i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsext.vf4 v26, v8 +; RV32-NEXT: vsll.vi v26, v26, 2 +; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV32-NEXT: vluxei32.v v8, (a0), v26, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_baseidx_sext_v8i8_v8i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsext.vf8 v28, v8 +; RV64-NEXT: vsll.vi v28, v28, 2 +; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV64-NEXT: vluxei64.v v8, (a0), v28, v0.t +; RV64-NEXT: ret + %eidxs = sext <8 x i8> %idxs to <8 x i32> + %ptrs = getelementptr inbounds i32, i32* %base, <8 x i32> %eidxs + %v = call <8 x i32> @llvm.vp.gather.v8i32.v8p0i32(<8 x i32*> %ptrs, <8 x i1> %m, i32 %evl) + ret <8 x i32> %v +} + +define <8 x i32> @vpgather_baseidx_zext_v8i8_v8i32(i32* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_baseidx_zext_v8i8_v8i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vzext.vf4 v26, v8 +; RV32-NEXT: vsll.vi v26, v26, 2 +; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV32-NEXT: vluxei32.v v8, (a0), v26, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_baseidx_zext_v8i8_v8i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vzext.vf8 v28, v8 +; RV64-NEXT: vsll.vi v28, v28, 2 +; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV64-NEXT: vluxei64.v v8, (a0), v28, v0.t +; RV64-NEXT: ret + %eidxs = zext <8 x i8> %idxs to <8 x i32> + %ptrs = getelementptr inbounds i32, i32* %base, <8 x i32> %eidxs + %v = call <8 x i32> @llvm.vp.gather.v8i32.v8p0i32(<8 x i32*> %ptrs, <8 x i1> %m, i32 %evl) + ret <8 x i32> %v +} + +define <8 x i32> @vpgather_baseidx_v8i16_v8i32(i32* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_baseidx_v8i16_v8i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsext.vf2 v26, v8 +; RV32-NEXT: vsll.vi v26, v26, 2 +; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV32-NEXT: vluxei32.v v8, (a0), v26, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_baseidx_v8i16_v8i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsext.vf4 v28, v8 +; RV64-NEXT: vsll.vi v28, v28, 2 +; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV64-NEXT: vluxei64.v v8, (a0), v28, v0.t +; RV64-NEXT: ret + %ptrs = getelementptr inbounds i32, i32* %base, <8 x i16> %idxs + %v = call <8 x i32> @llvm.vp.gather.v8i32.v8p0i32(<8 x i32*> %ptrs, <8 x i1> %m, i32 %evl) + ret <8 x i32> %v +} + +define <8 x i32> @vpgather_baseidx_sext_v8i16_v8i32(i32* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_baseidx_sext_v8i16_v8i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsext.vf2 v26, v8 +; RV32-NEXT: vsll.vi v26, v26, 2 +; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV32-NEXT: vluxei32.v v8, (a0), v26, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_baseidx_sext_v8i16_v8i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsext.vf4 v28, v8 +; RV64-NEXT: vsll.vi v28, v28, 2 +; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV64-NEXT: vluxei64.v v8, (a0), v28, v0.t +; RV64-NEXT: ret + %eidxs = sext <8 x i16> %idxs to <8 x i32> + %ptrs = getelementptr inbounds i32, i32* %base, <8 x i32> %eidxs + %v = call <8 x i32> @llvm.vp.gather.v8i32.v8p0i32(<8 x i32*> %ptrs, <8 x i1> %m, i32 %evl) + ret <8 x i32> %v +} + +define <8 x i32> @vpgather_baseidx_zext_v8i16_v8i32(i32* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_baseidx_zext_v8i16_v8i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vzext.vf2 v26, v8 +; RV32-NEXT: vsll.vi v26, v26, 2 +; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV32-NEXT: vluxei32.v v8, (a0), v26, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_baseidx_zext_v8i16_v8i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vzext.vf4 v28, v8 +; RV64-NEXT: vsll.vi v28, v28, 2 +; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV64-NEXT: vluxei64.v v8, (a0), v28, v0.t +; RV64-NEXT: ret + %eidxs = zext <8 x i16> %idxs to <8 x i32> + %ptrs = getelementptr inbounds i32, i32* %base, <8 x i32> %eidxs + %v = call <8 x i32> @llvm.vp.gather.v8i32.v8p0i32(<8 x i32*> %ptrs, <8 x i1> %m, i32 %evl) + ret <8 x i32> %v +} + +define <8 x i32> @vpgather_baseidx_v8i32(i32* %base, <8 x i32> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_baseidx_v8i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsll.vi v26, v8, 2 +; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV32-NEXT: vluxei32.v v8, (a0), v26, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_baseidx_v8i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsext.vf2 v28, v8 +; RV64-NEXT: vsll.vi v28, v28, 2 +; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV64-NEXT: vluxei64.v v8, (a0), v28, v0.t +; RV64-NEXT: ret + %ptrs = getelementptr inbounds i32, i32* %base, <8 x i32> %idxs + %v = call <8 x i32> @llvm.vp.gather.v8i32.v8p0i32(<8 x i32*> %ptrs, <8 x i1> %m, i32 %evl) + ret <8 x i32> %v +} + +declare <2 x i64> @llvm.vp.gather.v2i64.v2p0i64(<2 x i64*>, <2 x i1>, i32) + +define <2 x i64> @vpgather_v2i64(<2 x i64*> %ptrs, <2 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_v2i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; RV32-NEXT: vluxei32.v v25, (zero), v8, v0.t +; RV32-NEXT: vmv1r.v v8, v25 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_v2i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; RV64-NEXT: vluxei64.v v8, (zero), v8, v0.t +; RV64-NEXT: ret + %v = call <2 x i64> @llvm.vp.gather.v2i64.v2p0i64(<2 x i64*> %ptrs, <2 x i1> %m, i32 %evl) + ret <2 x i64> %v +} + +declare <4 x i64> @llvm.vp.gather.v4i64.v4p0i64(<4 x i64*>, <4 x i1>, i32) + +define <4 x i64> @vpgather_v4i64(<4 x i64*> %ptrs, <4 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_v4i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; RV32-NEXT: vluxei32.v v26, (zero), v8, v0.t +; RV32-NEXT: vmv2r.v v8, v26 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_v4i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; RV64-NEXT: vluxei64.v v8, (zero), v8, v0.t +; RV64-NEXT: ret + %v = call <4 x i64> @llvm.vp.gather.v4i64.v4p0i64(<4 x i64*> %ptrs, <4 x i1> %m, i32 %evl) + ret <4 x i64> %v +} + +define <4 x i64> @vpgather_truemask_v4i64(<4 x i64*> %ptrs, i32 zeroext %evl) { +; RV32-LABEL: vpgather_truemask_v4i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; RV32-NEXT: vluxei32.v v26, (zero), v8 +; RV32-NEXT: vmv2r.v v8, v26 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_truemask_v4i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; RV64-NEXT: vluxei64.v v8, (zero), v8 +; RV64-NEXT: ret + %mhead = insertelement <4 x i1> undef, i1 1, i32 0 + %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer + %v = call <4 x i64> @llvm.vp.gather.v4i64.v4p0i64(<4 x i64*> %ptrs, <4 x i1> %mtrue, i32 %evl) + ret <4 x i64> %v +} + +declare <8 x i64> @llvm.vp.gather.v8i64.v8p0i64(<8 x i64*>, <8 x i1>, i32) + +define <8 x i64> @vpgather_v8i64(<8 x i64*> %ptrs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_v8i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; RV32-NEXT: vluxei32.v v28, (zero), v8, v0.t +; RV32-NEXT: vmv4r.v v8, v28 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_v8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; RV64-NEXT: vluxei64.v v8, (zero), v8, v0.t +; RV64-NEXT: ret + %v = call <8 x i64> @llvm.vp.gather.v8i64.v8p0i64(<8 x i64*> %ptrs, <8 x i1> %m, i32 %evl) + ret <8 x i64> %v +} + +define <8 x i64> @vpgather_baseidx_v8i8_v8i64(i64* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_baseidx_v8i8_v8i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsext.vf4 v26, v8 +; RV32-NEXT: vsll.vi v26, v26, 3 +; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV32-NEXT: vluxei32.v v8, (a0), v26, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_baseidx_v8i8_v8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsext.vf8 v28, v8 +; RV64-NEXT: vsll.vi v28, v28, 3 +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vluxei64.v v8, (a0), v28, v0.t +; RV64-NEXT: ret + %ptrs = getelementptr inbounds i64, i64* %base, <8 x i8> %idxs + %v = call <8 x i64> @llvm.vp.gather.v8i64.v8p0i64(<8 x i64*> %ptrs, <8 x i1> %m, i32 %evl) + ret <8 x i64> %v +} + +define <8 x i64> @vpgather_baseidx_sext_v8i8_v8i64(i64* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpgather_baseidx_sext_v8i8_v8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vsext.vf8 v28, v8 +; CHECK-NEXT: vsll.vi v28, v28, 3 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vluxei64.v v8, (a0), v28, v0.t +; CHECK-NEXT: ret + %eidxs = sext <8 x i8> %idxs to <8 x i64> + %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs + %v = call <8 x i64> @llvm.vp.gather.v8i64.v8p0i64(<8 x i64*> %ptrs, <8 x i1> %m, i32 %evl) + ret <8 x i64> %v +} + +define <8 x i64> @vpgather_baseidx_zext_v8i8_v8i64(i64* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpgather_baseidx_zext_v8i8_v8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vzext.vf8 v28, v8 +; CHECK-NEXT: vsll.vi v28, v28, 3 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vluxei64.v v8, (a0), v28, v0.t +; CHECK-NEXT: ret + %eidxs = zext <8 x i8> %idxs to <8 x i64> + %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs + %v = call <8 x i64> @llvm.vp.gather.v8i64.v8p0i64(<8 x i64*> %ptrs, <8 x i1> %m, i32 %evl) + ret <8 x i64> %v +} + +define <8 x i64> @vpgather_baseidx_v8i16_v8i64(i64* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_baseidx_v8i16_v8i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsext.vf2 v26, v8 +; RV32-NEXT: vsll.vi v26, v26, 3 +; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV32-NEXT: vluxei32.v v8, (a0), v26, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_baseidx_v8i16_v8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsext.vf4 v28, v8 +; RV64-NEXT: vsll.vi v28, v28, 3 +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vluxei64.v v8, (a0), v28, v0.t +; RV64-NEXT: ret + %ptrs = getelementptr inbounds i64, i64* %base, <8 x i16> %idxs + %v = call <8 x i64> @llvm.vp.gather.v8i64.v8p0i64(<8 x i64*> %ptrs, <8 x i1> %m, i32 %evl) + ret <8 x i64> %v +} + +define <8 x i64> @vpgather_baseidx_sext_v8i16_v8i64(i64* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpgather_baseidx_sext_v8i16_v8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vsext.vf4 v28, v8 +; CHECK-NEXT: vsll.vi v28, v28, 3 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vluxei64.v v8, (a0), v28, v0.t +; CHECK-NEXT: ret + %eidxs = sext <8 x i16> %idxs to <8 x i64> + %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs + %v = call <8 x i64> @llvm.vp.gather.v8i64.v8p0i64(<8 x i64*> %ptrs, <8 x i1> %m, i32 %evl) + ret <8 x i64> %v +} + +define <8 x i64> @vpgather_baseidx_zext_v8i16_v8i64(i64* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpgather_baseidx_zext_v8i16_v8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vzext.vf4 v28, v8 +; CHECK-NEXT: vsll.vi v28, v28, 3 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vluxei64.v v8, (a0), v28, v0.t +; CHECK-NEXT: ret + %eidxs = zext <8 x i16> %idxs to <8 x i64> + %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs + %v = call <8 x i64> @llvm.vp.gather.v8i64.v8p0i64(<8 x i64*> %ptrs, <8 x i1> %m, i32 %evl) + ret <8 x i64> %v +} + +define <8 x i64> @vpgather_baseidx_v8i32_v8i64(i64* %base, <8 x i32> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_baseidx_v8i32_v8i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsll.vi v26, v8, 3 +; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV32-NEXT: vluxei32.v v8, (a0), v26, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_baseidx_v8i32_v8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsext.vf2 v28, v8 +; RV64-NEXT: vsll.vi v28, v28, 3 +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vluxei64.v v8, (a0), v28, v0.t +; RV64-NEXT: ret + %ptrs = getelementptr inbounds i64, i64* %base, <8 x i32> %idxs + %v = call <8 x i64> @llvm.vp.gather.v8i64.v8p0i64(<8 x i64*> %ptrs, <8 x i1> %m, i32 %evl) + ret <8 x i64> %v +} + +define <8 x i64> @vpgather_baseidx_sext_v8i32_v8i64(i64* %base, <8 x i32> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpgather_baseidx_sext_v8i32_v8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vsext.vf2 v28, v8 +; CHECK-NEXT: vsll.vi v28, v28, 3 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vluxei64.v v8, (a0), v28, v0.t +; CHECK-NEXT: ret + %eidxs = sext <8 x i32> %idxs to <8 x i64> + %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs + %v = call <8 x i64> @llvm.vp.gather.v8i64.v8p0i64(<8 x i64*> %ptrs, <8 x i1> %m, i32 %evl) + ret <8 x i64> %v +} + +define <8 x i64> @vpgather_baseidx_zext_v8i32_v8i64(i64* %base, <8 x i32> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpgather_baseidx_zext_v8i32_v8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vzext.vf2 v28, v8 +; CHECK-NEXT: vsll.vi v28, v28, 3 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vluxei64.v v8, (a0), v28, v0.t +; CHECK-NEXT: ret + %eidxs = zext <8 x i32> %idxs to <8 x i64> + %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs + %v = call <8 x i64> @llvm.vp.gather.v8i64.v8p0i64(<8 x i64*> %ptrs, <8 x i1> %m, i32 %evl) + ret <8 x i64> %v +} + +define <8 x i64> @vpgather_baseidx_v8i64(i64* %base, <8 x i64> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpgather_baseidx_v8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vsll.vi v28, v8, 3 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vluxei64.v v8, (a0), v28, v0.t +; CHECK-NEXT: ret + %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %idxs + %v = call <8 x i64> @llvm.vp.gather.v8i64.v8p0i64(<8 x i64*> %ptrs, <8 x i1> %m, i32 %evl) + ret <8 x i64> %v +} + +declare <2 x half> @llvm.vp.gather.v2f16.v2p0f16(<2 x half*>, <2 x i1>, i32) + +define <2 x half> @vpgather_v2f16(<2 x half*> %ptrs, <2 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_v2f16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; RV32-NEXT: vluxei32.v v25, (zero), v8, v0.t +; RV32-NEXT: vmv1r.v v8, v25 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_v2f16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; RV64-NEXT: vluxei64.v v25, (zero), v8, v0.t +; RV64-NEXT: vmv1r.v v8, v25 +; RV64-NEXT: ret + %v = call <2 x half> @llvm.vp.gather.v2f16.v2p0f16(<2 x half*> %ptrs, <2 x i1> %m, i32 %evl) + ret <2 x half> %v +} + +declare <4 x half> @llvm.vp.gather.v4f16.v4p0f16(<4 x half*>, <4 x i1>, i32) + +define <4 x half> @vpgather_v4f16(<4 x half*> %ptrs, <4 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_v4f16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV32-NEXT: vluxei32.v v25, (zero), v8, v0.t +; RV32-NEXT: vmv1r.v v8, v25 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_v4f16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV64-NEXT: vluxei64.v v25, (zero), v8, v0.t +; RV64-NEXT: vmv1r.v v8, v25 +; RV64-NEXT: ret + %v = call <4 x half> @llvm.vp.gather.v4f16.v4p0f16(<4 x half*> %ptrs, <4 x i1> %m, i32 %evl) + ret <4 x half> %v +} + +define <4 x half> @vpgather_truemask_v4f16(<4 x half*> %ptrs, i32 zeroext %evl) { +; RV32-LABEL: vpgather_truemask_v4f16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV32-NEXT: vluxei32.v v25, (zero), v8 +; RV32-NEXT: vmv1r.v v8, v25 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_truemask_v4f16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV64-NEXT: vluxei64.v v25, (zero), v8 +; RV64-NEXT: vmv1r.v v8, v25 +; RV64-NEXT: ret + %mhead = insertelement <4 x i1> undef, i1 1, i32 0 + %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer + %v = call <4 x half> @llvm.vp.gather.v4f16.v4p0f16(<4 x half*> %ptrs, <4 x i1> %mtrue, i32 %evl) + ret <4 x half> %v +} + +declare <8 x half> @llvm.vp.gather.v8f16.v8p0f16(<8 x half*>, <8 x i1>, i32) + +define <8 x half> @vpgather_v8f16(<8 x half*> %ptrs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_v8f16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; RV32-NEXT: vluxei32.v v25, (zero), v8, v0.t +; RV32-NEXT: vmv1r.v v8, v25 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_v8f16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; RV64-NEXT: vluxei64.v v25, (zero), v8, v0.t +; RV64-NEXT: vmv1r.v v8, v25 +; RV64-NEXT: ret + %v = call <8 x half> @llvm.vp.gather.v8f16.v8p0f16(<8 x half*> %ptrs, <8 x i1> %m, i32 %evl) + ret <8 x half> %v +} + +define <8 x half> @vpgather_baseidx_v8i8_v8f16(half* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_baseidx_v8i8_v8f16: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsext.vf4 v26, v8 +; RV32-NEXT: vadd.vv v26, v26, v26 +; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; RV32-NEXT: vluxei32.v v8, (a0), v26, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_baseidx_v8i8_v8f16: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsext.vf8 v28, v8 +; RV64-NEXT: vadd.vv v28, v28, v28 +; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; RV64-NEXT: vluxei64.v v8, (a0), v28, v0.t +; RV64-NEXT: ret + %ptrs = getelementptr inbounds half, half* %base, <8 x i8> %idxs + %v = call <8 x half> @llvm.vp.gather.v8f16.v8p0f16(<8 x half*> %ptrs, <8 x i1> %m, i32 %evl) + ret <8 x half> %v +} + +define <8 x half> @vpgather_baseidx_sext_v8i8_v8f16(half* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_baseidx_sext_v8i8_v8f16: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsext.vf4 v26, v8 +; RV32-NEXT: vadd.vv v26, v26, v26 +; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; RV32-NEXT: vluxei32.v v8, (a0), v26, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_baseidx_sext_v8i8_v8f16: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsext.vf8 v28, v8 +; RV64-NEXT: vadd.vv v28, v28, v28 +; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; RV64-NEXT: vluxei64.v v8, (a0), v28, v0.t +; RV64-NEXT: ret + %eidxs = sext <8 x i8> %idxs to <8 x i16> + %ptrs = getelementptr inbounds half, half* %base, <8 x i16> %eidxs + %v = call <8 x half> @llvm.vp.gather.v8f16.v8p0f16(<8 x half*> %ptrs, <8 x i1> %m, i32 %evl) + ret <8 x half> %v +} + +define <8 x half> @vpgather_baseidx_zext_v8i8_v8f16(half* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_baseidx_zext_v8i8_v8f16: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vzext.vf4 v26, v8 +; RV32-NEXT: vadd.vv v26, v26, v26 +; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; RV32-NEXT: vluxei32.v v8, (a0), v26, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_baseidx_zext_v8i8_v8f16: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vzext.vf8 v28, v8 +; RV64-NEXT: vadd.vv v28, v28, v28 +; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; RV64-NEXT: vluxei64.v v8, (a0), v28, v0.t +; RV64-NEXT: ret + %eidxs = zext <8 x i8> %idxs to <8 x i16> + %ptrs = getelementptr inbounds half, half* %base, <8 x i16> %eidxs + %v = call <8 x half> @llvm.vp.gather.v8f16.v8p0f16(<8 x half*> %ptrs, <8 x i1> %m, i32 %evl) + ret <8 x half> %v +} + +define <8 x half> @vpgather_baseidx_v8f16(half* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_baseidx_v8f16: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsext.vf2 v26, v8 +; RV32-NEXT: vadd.vv v26, v26, v26 +; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; RV32-NEXT: vluxei32.v v8, (a0), v26, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_baseidx_v8f16: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsext.vf4 v28, v8 +; RV64-NEXT: vadd.vv v28, v28, v28 +; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; RV64-NEXT: vluxei64.v v8, (a0), v28, v0.t +; RV64-NEXT: ret + %ptrs = getelementptr inbounds half, half* %base, <8 x i16> %idxs + %v = call <8 x half> @llvm.vp.gather.v8f16.v8p0f16(<8 x half*> %ptrs, <8 x i1> %m, i32 %evl) + ret <8 x half> %v +} + +declare <2 x float> @llvm.vp.gather.v2f32.v2p0f32(<2 x float*>, <2 x i1>, i32) + +define <2 x float> @vpgather_v2f32(<2 x float*> %ptrs, <2 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_v2f32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; RV32-NEXT: vluxei32.v v8, (zero), v8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_v2f32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; RV64-NEXT: vluxei64.v v25, (zero), v8, v0.t +; RV64-NEXT: vmv1r.v v8, v25 +; RV64-NEXT: ret + %v = call <2 x float> @llvm.vp.gather.v2f32.v2p0f32(<2 x float*> %ptrs, <2 x i1> %m, i32 %evl) + ret <2 x float> %v +} + +declare <4 x float> @llvm.vp.gather.v4f32.v4p0f32(<4 x float*>, <4 x i1>, i32) + +define <4 x float> @vpgather_v4f32(<4 x float*> %ptrs, <4 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_v4f32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; RV32-NEXT: vluxei32.v v8, (zero), v8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_v4f32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; RV64-NEXT: vluxei64.v v25, (zero), v8, v0.t +; RV64-NEXT: vmv1r.v v8, v25 +; RV64-NEXT: ret + %v = call <4 x float> @llvm.vp.gather.v4f32.v4p0f32(<4 x float*> %ptrs, <4 x i1> %m, i32 %evl) + ret <4 x float> %v +} + +define <4 x float> @vpgather_truemask_v4f32(<4 x float*> %ptrs, i32 zeroext %evl) { +; RV32-LABEL: vpgather_truemask_v4f32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; RV32-NEXT: vluxei32.v v8, (zero), v8 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_truemask_v4f32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; RV64-NEXT: vluxei64.v v25, (zero), v8 +; RV64-NEXT: vmv1r.v v8, v25 +; RV64-NEXT: ret + %mhead = insertelement <4 x i1> undef, i1 1, i32 0 + %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer + %v = call <4 x float> @llvm.vp.gather.v4f32.v4p0f32(<4 x float*> %ptrs, <4 x i1> %mtrue, i32 %evl) + ret <4 x float> %v +} + +declare <8 x float> @llvm.vp.gather.v8f32.v8p0f32(<8 x float*>, <8 x i1>, i32) + +define <8 x float> @vpgather_v8f32(<8 x float*> %ptrs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_v8f32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; RV32-NEXT: vluxei32.v v8, (zero), v8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_v8f32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; RV64-NEXT: vluxei64.v v26, (zero), v8, v0.t +; RV64-NEXT: vmv2r.v v8, v26 +; RV64-NEXT: ret + %v = call <8 x float> @llvm.vp.gather.v8f32.v8p0f32(<8 x float*> %ptrs, <8 x i1> %m, i32 %evl) + ret <8 x float> %v +} + +define <8 x float> @vpgather_baseidx_v8i8_v8f32(float* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_baseidx_v8i8_v8f32: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsext.vf4 v26, v8 +; RV32-NEXT: vsll.vi v26, v26, 2 +; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV32-NEXT: vluxei32.v v8, (a0), v26, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_baseidx_v8i8_v8f32: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsext.vf8 v28, v8 +; RV64-NEXT: vsll.vi v28, v28, 2 +; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV64-NEXT: vluxei64.v v8, (a0), v28, v0.t +; RV64-NEXT: ret + %ptrs = getelementptr inbounds float, float* %base, <8 x i8> %idxs + %v = call <8 x float> @llvm.vp.gather.v8f32.v8p0f32(<8 x float*> %ptrs, <8 x i1> %m, i32 %evl) + ret <8 x float> %v +} + +define <8 x float> @vpgather_baseidx_sext_v8i8_v8f32(float* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_baseidx_sext_v8i8_v8f32: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsext.vf4 v26, v8 +; RV32-NEXT: vsll.vi v26, v26, 2 +; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV32-NEXT: vluxei32.v v8, (a0), v26, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_baseidx_sext_v8i8_v8f32: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsext.vf8 v28, v8 +; RV64-NEXT: vsll.vi v28, v28, 2 +; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV64-NEXT: vluxei64.v v8, (a0), v28, v0.t +; RV64-NEXT: ret + %eidxs = sext <8 x i8> %idxs to <8 x i32> + %ptrs = getelementptr inbounds float, float* %base, <8 x i32> %eidxs + %v = call <8 x float> @llvm.vp.gather.v8f32.v8p0f32(<8 x float*> %ptrs, <8 x i1> %m, i32 %evl) + ret <8 x float> %v +} + +define <8 x float> @vpgather_baseidx_zext_v8i8_v8f32(float* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_baseidx_zext_v8i8_v8f32: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vzext.vf4 v26, v8 +; RV32-NEXT: vsll.vi v26, v26, 2 +; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV32-NEXT: vluxei32.v v8, (a0), v26, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_baseidx_zext_v8i8_v8f32: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vzext.vf8 v28, v8 +; RV64-NEXT: vsll.vi v28, v28, 2 +; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV64-NEXT: vluxei64.v v8, (a0), v28, v0.t +; RV64-NEXT: ret + %eidxs = zext <8 x i8> %idxs to <8 x i32> + %ptrs = getelementptr inbounds float, float* %base, <8 x i32> %eidxs + %v = call <8 x float> @llvm.vp.gather.v8f32.v8p0f32(<8 x float*> %ptrs, <8 x i1> %m, i32 %evl) + ret <8 x float> %v +} + +define <8 x float> @vpgather_baseidx_v8i16_v8f32(float* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_baseidx_v8i16_v8f32: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsext.vf2 v26, v8 +; RV32-NEXT: vsll.vi v26, v26, 2 +; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV32-NEXT: vluxei32.v v8, (a0), v26, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_baseidx_v8i16_v8f32: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsext.vf4 v28, v8 +; RV64-NEXT: vsll.vi v28, v28, 2 +; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV64-NEXT: vluxei64.v v8, (a0), v28, v0.t +; RV64-NEXT: ret + %ptrs = getelementptr inbounds float, float* %base, <8 x i16> %idxs + %v = call <8 x float> @llvm.vp.gather.v8f32.v8p0f32(<8 x float*> %ptrs, <8 x i1> %m, i32 %evl) + ret <8 x float> %v +} + +define <8 x float> @vpgather_baseidx_sext_v8i16_v8f32(float* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_baseidx_sext_v8i16_v8f32: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsext.vf2 v26, v8 +; RV32-NEXT: vsll.vi v26, v26, 2 +; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV32-NEXT: vluxei32.v v8, (a0), v26, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_baseidx_sext_v8i16_v8f32: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsext.vf4 v28, v8 +; RV64-NEXT: vsll.vi v28, v28, 2 +; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV64-NEXT: vluxei64.v v8, (a0), v28, v0.t +; RV64-NEXT: ret + %eidxs = sext <8 x i16> %idxs to <8 x i32> + %ptrs = getelementptr inbounds float, float* %base, <8 x i32> %eidxs + %v = call <8 x float> @llvm.vp.gather.v8f32.v8p0f32(<8 x float*> %ptrs, <8 x i1> %m, i32 %evl) + ret <8 x float> %v +} + +define <8 x float> @vpgather_baseidx_zext_v8i16_v8f32(float* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_baseidx_zext_v8i16_v8f32: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vzext.vf2 v26, v8 +; RV32-NEXT: vsll.vi v26, v26, 2 +; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV32-NEXT: vluxei32.v v8, (a0), v26, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_baseidx_zext_v8i16_v8f32: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vzext.vf4 v28, v8 +; RV64-NEXT: vsll.vi v28, v28, 2 +; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV64-NEXT: vluxei64.v v8, (a0), v28, v0.t +; RV64-NEXT: ret + %eidxs = zext <8 x i16> %idxs to <8 x i32> + %ptrs = getelementptr inbounds float, float* %base, <8 x i32> %eidxs + %v = call <8 x float> @llvm.vp.gather.v8f32.v8p0f32(<8 x float*> %ptrs, <8 x i1> %m, i32 %evl) + ret <8 x float> %v +} + +define <8 x float> @vpgather_baseidx_v8f32(float* %base, <8 x i32> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_baseidx_v8f32: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsll.vi v26, v8, 2 +; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV32-NEXT: vluxei32.v v8, (a0), v26, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_baseidx_v8f32: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsext.vf2 v28, v8 +; RV64-NEXT: vsll.vi v28, v28, 2 +; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV64-NEXT: vluxei64.v v8, (a0), v28, v0.t +; RV64-NEXT: ret + %ptrs = getelementptr inbounds float, float* %base, <8 x i32> %idxs + %v = call <8 x float> @llvm.vp.gather.v8f32.v8p0f32(<8 x float*> %ptrs, <8 x i1> %m, i32 %evl) + ret <8 x float> %v +} + +declare <2 x double> @llvm.vp.gather.v2f64.v2p0f64(<2 x double*>, <2 x i1>, i32) + +define <2 x double> @vpgather_v2f64(<2 x double*> %ptrs, <2 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_v2f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; RV32-NEXT: vluxei32.v v25, (zero), v8, v0.t +; RV32-NEXT: vmv1r.v v8, v25 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_v2f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; RV64-NEXT: vluxei64.v v8, (zero), v8, v0.t +; RV64-NEXT: ret + %v = call <2 x double> @llvm.vp.gather.v2f64.v2p0f64(<2 x double*> %ptrs, <2 x i1> %m, i32 %evl) + ret <2 x double> %v +} + +declare <4 x double> @llvm.vp.gather.v4f64.v4p0f64(<4 x double*>, <4 x i1>, i32) + +define <4 x double> @vpgather_v4f64(<4 x double*> %ptrs, <4 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_v4f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; RV32-NEXT: vluxei32.v v26, (zero), v8, v0.t +; RV32-NEXT: vmv2r.v v8, v26 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_v4f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; RV64-NEXT: vluxei64.v v8, (zero), v8, v0.t +; RV64-NEXT: ret + %v = call <4 x double> @llvm.vp.gather.v4f64.v4p0f64(<4 x double*> %ptrs, <4 x i1> %m, i32 %evl) + ret <4 x double> %v +} + +define <4 x double> @vpgather_truemask_v4f64(<4 x double*> %ptrs, i32 zeroext %evl) { +; RV32-LABEL: vpgather_truemask_v4f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; RV32-NEXT: vluxei32.v v26, (zero), v8 +; RV32-NEXT: vmv2r.v v8, v26 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_truemask_v4f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; RV64-NEXT: vluxei64.v v8, (zero), v8 +; RV64-NEXT: ret + %mhead = insertelement <4 x i1> undef, i1 1, i32 0 + %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer + %v = call <4 x double> @llvm.vp.gather.v4f64.v4p0f64(<4 x double*> %ptrs, <4 x i1> %mtrue, i32 %evl) + ret <4 x double> %v +} + +declare <8 x double> @llvm.vp.gather.v8f64.v8p0f64(<8 x double*>, <8 x i1>, i32) + +define <8 x double> @vpgather_v8f64(<8 x double*> %ptrs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_v8f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; RV32-NEXT: vluxei32.v v28, (zero), v8, v0.t +; RV32-NEXT: vmv4r.v v8, v28 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_v8f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; RV64-NEXT: vluxei64.v v8, (zero), v8, v0.t +; RV64-NEXT: ret + %v = call <8 x double> @llvm.vp.gather.v8f64.v8p0f64(<8 x double*> %ptrs, <8 x i1> %m, i32 %evl) + ret <8 x double> %v +} + +define <8 x double> @vpgather_baseidx_v8i8_v8f64(double* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_baseidx_v8i8_v8f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsext.vf4 v26, v8 +; RV32-NEXT: vsll.vi v26, v26, 3 +; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV32-NEXT: vluxei32.v v8, (a0), v26, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_baseidx_v8i8_v8f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsext.vf8 v28, v8 +; RV64-NEXT: vsll.vi v28, v28, 3 +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vluxei64.v v8, (a0), v28, v0.t +; RV64-NEXT: ret + %ptrs = getelementptr inbounds double, double* %base, <8 x i8> %idxs + %v = call <8 x double> @llvm.vp.gather.v8f64.v8p0f64(<8 x double*> %ptrs, <8 x i1> %m, i32 %evl) + ret <8 x double> %v +} + +define <8 x double> @vpgather_baseidx_sext_v8i8_v8f64(double* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpgather_baseidx_sext_v8i8_v8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vsext.vf8 v28, v8 +; CHECK-NEXT: vsll.vi v28, v28, 3 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vluxei64.v v8, (a0), v28, v0.t +; CHECK-NEXT: ret + %eidxs = sext <8 x i8> %idxs to <8 x i64> + %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs + %v = call <8 x double> @llvm.vp.gather.v8f64.v8p0f64(<8 x double*> %ptrs, <8 x i1> %m, i32 %evl) + ret <8 x double> %v +} + +define <8 x double> @vpgather_baseidx_zext_v8i8_v8f64(double* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpgather_baseidx_zext_v8i8_v8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vzext.vf8 v28, v8 +; CHECK-NEXT: vsll.vi v28, v28, 3 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vluxei64.v v8, (a0), v28, v0.t +; CHECK-NEXT: ret + %eidxs = zext <8 x i8> %idxs to <8 x i64> + %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs + %v = call <8 x double> @llvm.vp.gather.v8f64.v8p0f64(<8 x double*> %ptrs, <8 x i1> %m, i32 %evl) + ret <8 x double> %v +} + +define <8 x double> @vpgather_baseidx_v8i16_v8f64(double* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_baseidx_v8i16_v8f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsext.vf2 v26, v8 +; RV32-NEXT: vsll.vi v26, v26, 3 +; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV32-NEXT: vluxei32.v v8, (a0), v26, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_baseidx_v8i16_v8f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsext.vf4 v28, v8 +; RV64-NEXT: vsll.vi v28, v28, 3 +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vluxei64.v v8, (a0), v28, v0.t +; RV64-NEXT: ret + %ptrs = getelementptr inbounds double, double* %base, <8 x i16> %idxs + %v = call <8 x double> @llvm.vp.gather.v8f64.v8p0f64(<8 x double*> %ptrs, <8 x i1> %m, i32 %evl) + ret <8 x double> %v +} + +define <8 x double> @vpgather_baseidx_sext_v8i16_v8f64(double* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpgather_baseidx_sext_v8i16_v8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vsext.vf4 v28, v8 +; CHECK-NEXT: vsll.vi v28, v28, 3 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vluxei64.v v8, (a0), v28, v0.t +; CHECK-NEXT: ret + %eidxs = sext <8 x i16> %idxs to <8 x i64> + %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs + %v = call <8 x double> @llvm.vp.gather.v8f64.v8p0f64(<8 x double*> %ptrs, <8 x i1> %m, i32 %evl) + ret <8 x double> %v +} + +define <8 x double> @vpgather_baseidx_zext_v8i16_v8f64(double* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpgather_baseidx_zext_v8i16_v8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vzext.vf4 v28, v8 +; CHECK-NEXT: vsll.vi v28, v28, 3 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vluxei64.v v8, (a0), v28, v0.t +; CHECK-NEXT: ret + %eidxs = zext <8 x i16> %idxs to <8 x i64> + %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs + %v = call <8 x double> @llvm.vp.gather.v8f64.v8p0f64(<8 x double*> %ptrs, <8 x i1> %m, i32 %evl) + ret <8 x double> %v +} + +define <8 x double> @vpgather_baseidx_v8i32_v8f64(double* %base, <8 x i32> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_baseidx_v8i32_v8f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsll.vi v26, v8, 3 +; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV32-NEXT: vluxei32.v v8, (a0), v26, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_baseidx_v8i32_v8f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsext.vf2 v28, v8 +; RV64-NEXT: vsll.vi v28, v28, 3 +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vluxei64.v v8, (a0), v28, v0.t +; RV64-NEXT: ret + %ptrs = getelementptr inbounds double, double* %base, <8 x i32> %idxs + %v = call <8 x double> @llvm.vp.gather.v8f64.v8p0f64(<8 x double*> %ptrs, <8 x i1> %m, i32 %evl) + ret <8 x double> %v +} + +define <8 x double> @vpgather_baseidx_sext_v8i32_v8f64(double* %base, <8 x i32> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpgather_baseidx_sext_v8i32_v8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vsext.vf2 v28, v8 +; CHECK-NEXT: vsll.vi v28, v28, 3 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vluxei64.v v8, (a0), v28, v0.t +; CHECK-NEXT: ret + %eidxs = sext <8 x i32> %idxs to <8 x i64> + %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs + %v = call <8 x double> @llvm.vp.gather.v8f64.v8p0f64(<8 x double*> %ptrs, <8 x i1> %m, i32 %evl) + ret <8 x double> %v +} + +define <8 x double> @vpgather_baseidx_zext_v8i32_v8f64(double* %base, <8 x i32> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpgather_baseidx_zext_v8i32_v8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vzext.vf2 v28, v8 +; CHECK-NEXT: vsll.vi v28, v28, 3 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vluxei64.v v8, (a0), v28, v0.t +; CHECK-NEXT: ret + %eidxs = zext <8 x i32> %idxs to <8 x i64> + %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs + %v = call <8 x double> @llvm.vp.gather.v8f64.v8p0f64(<8 x double*> %ptrs, <8 x i1> %m, i32 %evl) + ret <8 x double> %v +} + +define <8 x double> @vpgather_baseidx_v8f64(double* %base, <8 x i64> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpgather_baseidx_v8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vsll.vi v28, v8, 3 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vluxei64.v v8, (a0), v28, v0.t +; CHECK-NEXT: ret + %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %idxs + %v = call <8 x double> @llvm.vp.gather.v8f64.v8p0f64(<8 x double*> %ptrs, <8 x i1> %m, i32 %evl) + ret <8 x double> %v +} diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll @@ -0,0 +1,1530 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zfh,+experimental-v -riscv-v-vector-bits-min=128 \ +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zfh,+experimental-v -riscv-v-vector-bits-min=128 \ +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 + +declare void @llvm.vp.scatter.v2i8.v2p0i8(<2 x i8>, <2 x i8*>, <2 x i1>, i32) + +define void @vpscatter_v2i8(<2 x i8> %val, <2 x i8*> %ptrs, <2 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_v2i8: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_v2i8: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t +; RV64-NEXT: ret + call void @llvm.vp.scatter.v2i8.v2p0i8(<2 x i8> %val, <2 x i8*> %ptrs, <2 x i1> %m, i32 %evl) + ret void +} + +define void @vpscatter_v2i16_truncstore_v2i8(<2 x i16> %val, <2 x i8*> %ptrs, <2 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_v2i16_truncstore_v2i8: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; RV32-NEXT: vnsrl.wi v25, v8, 0 +; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; RV32-NEXT: vsoxei32.v v25, (zero), v9, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_v2i16_truncstore_v2i8: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; RV64-NEXT: vnsrl.wi v25, v8, 0 +; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; RV64-NEXT: vsoxei64.v v25, (zero), v9, v0.t +; RV64-NEXT: ret + %tval = trunc <2 x i16> %val to <2 x i8> + call void @llvm.vp.scatter.v2i8.v2p0i8(<2 x i8> %tval, <2 x i8*> %ptrs, <2 x i1> %m, i32 %evl) + ret void +} + +define void @vpscatter_v2i32_truncstore_v2i8(<2 x i32> %val, <2 x i8*> %ptrs, <2 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_v2i32_truncstore_v2i8: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; RV32-NEXT: vnsrl.wi v25, v8, 0 +; RV32-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; RV32-NEXT: vnsrl.wi v25, v25, 0 +; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; RV32-NEXT: vsoxei32.v v25, (zero), v9, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_v2i32_truncstore_v2i8: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; RV64-NEXT: vnsrl.wi v25, v8, 0 +; RV64-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; RV64-NEXT: vnsrl.wi v25, v25, 0 +; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; RV64-NEXT: vsoxei64.v v25, (zero), v9, v0.t +; RV64-NEXT: ret + %tval = trunc <2 x i32> %val to <2 x i8> + call void @llvm.vp.scatter.v2i8.v2p0i8(<2 x i8> %tval, <2 x i8*> %ptrs, <2 x i1> %m, i32 %evl) + ret void +} + +define void @vpscatter_v2i64_truncstore_v2i8(<2 x i64> %val, <2 x i8*> %ptrs, <2 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_v2i64_truncstore_v2i8: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV32-NEXT: vnsrl.wi v25, v8, 0 +; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; RV32-NEXT: vnsrl.wi v25, v25, 0 +; RV32-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; RV32-NEXT: vnsrl.wi v25, v25, 0 +; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; RV32-NEXT: vsoxei32.v v25, (zero), v9, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_v2i64_truncstore_v2i8: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV64-NEXT: vnsrl.wi v25, v8, 0 +; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; RV64-NEXT: vnsrl.wi v25, v25, 0 +; RV64-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; RV64-NEXT: vnsrl.wi v25, v25, 0 +; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; RV64-NEXT: vsoxei64.v v25, (zero), v9, v0.t +; RV64-NEXT: ret + %tval = trunc <2 x i64> %val to <2 x i8> + call void @llvm.vp.scatter.v2i8.v2p0i8(<2 x i8> %tval, <2 x i8*> %ptrs, <2 x i1> %m, i32 %evl) + ret void +} + +declare void @llvm.vp.scatter.v4i8.v4p0i8(<4 x i8>, <4 x i8*>, <4 x i1>, i32) + +define void @vpscatter_v4i8(<4 x i8> %val, <4 x i8*> %ptrs, <4 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_v4i8: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_v4i8: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t +; RV64-NEXT: ret + call void @llvm.vp.scatter.v4i8.v4p0i8(<4 x i8> %val, <4 x i8*> %ptrs, <4 x i1> %m, i32 %evl) + ret void +} + +define void @vpscatter_truemask_v4i8(<4 x i8> %val, <4 x i8*> %ptrs, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_truemask_v4i8: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; RV32-NEXT: vsoxei32.v v8, (zero), v9 +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_truemask_v4i8: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; RV64-NEXT: vsoxei64.v v8, (zero), v10 +; RV64-NEXT: ret + %mhead = insertelement <4 x i1> undef, i1 1, i32 0 + %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer + call void @llvm.vp.scatter.v4i8.v4p0i8(<4 x i8> %val, <4 x i8*> %ptrs, <4 x i1> %mtrue, i32 %evl) + ret void +} + +declare void @llvm.vp.scatter.v8i8.v8p0i8(<8 x i8>, <8 x i8*>, <8 x i1>, i32) + +define void @vpscatter_v8i8(<8 x i8> %val, <8 x i8*> %ptrs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_v8i8: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_v8i8: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t +; RV64-NEXT: ret + call void @llvm.vp.scatter.v8i8.v8p0i8(<8 x i8> %val, <8 x i8*> %ptrs, <8 x i1> %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_v8i8(<8 x i8> %val, i8* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_baseidx_v8i8: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsext.vf4 v26, v9 +; RV32-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; RV32-NEXT: vsoxei32.v v8, (a0), v26, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_baseidx_v8i8: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsext.vf8 v28, v9 +; RV64-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; RV64-NEXT: vsoxei64.v v8, (a0), v28, v0.t +; RV64-NEXT: ret + %ptrs = getelementptr inbounds i8, i8* %base, <8 x i8> %idxs + call void @llvm.vp.scatter.v8i8.v8p0i8(<8 x i8> %val, <8 x i8*> %ptrs, <8 x i1> %m, i32 %evl) + ret void +} + +declare void @llvm.vp.scatter.v2i16.v2p0i16(<2 x i16>, <2 x i16*>, <2 x i1>, i32) + +define void @vpscatter_v2i16(<2 x i16> %val, <2 x i16*> %ptrs, <2 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_v2i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_v2i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t +; RV64-NEXT: ret + call void @llvm.vp.scatter.v2i16.v2p0i16(<2 x i16> %val, <2 x i16*> %ptrs, <2 x i1> %m, i32 %evl) + ret void +} + +define void @vpscatter_v2i32_truncstore_v2i16(<2 x i32> %val, <2 x i16*> %ptrs, <2 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_v2i32_truncstore_v2i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; RV32-NEXT: vnsrl.wi v25, v8, 0 +; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; RV32-NEXT: vsoxei32.v v25, (zero), v9, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_v2i32_truncstore_v2i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; RV64-NEXT: vnsrl.wi v25, v8, 0 +; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; RV64-NEXT: vsoxei64.v v25, (zero), v9, v0.t +; RV64-NEXT: ret + %tval = trunc <2 x i32> %val to <2 x i16> + call void @llvm.vp.scatter.v2i16.v2p0i16(<2 x i16> %tval, <2 x i16*> %ptrs, <2 x i1> %m, i32 %evl) + ret void +} + +define void @vpscatter_v2i64_truncstore_v2i16(<2 x i64> %val, <2 x i16*> %ptrs, <2 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_v2i64_truncstore_v2i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV32-NEXT: vnsrl.wi v25, v8, 0 +; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; RV32-NEXT: vnsrl.wi v25, v25, 0 +; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; RV32-NEXT: vsoxei32.v v25, (zero), v9, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_v2i64_truncstore_v2i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV64-NEXT: vnsrl.wi v25, v8, 0 +; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; RV64-NEXT: vnsrl.wi v25, v25, 0 +; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; RV64-NEXT: vsoxei64.v v25, (zero), v9, v0.t +; RV64-NEXT: ret + %tval = trunc <2 x i64> %val to <2 x i16> + call void @llvm.vp.scatter.v2i16.v2p0i16(<2 x i16> %tval, <2 x i16*> %ptrs, <2 x i1> %m, i32 %evl) + ret void +} + +declare void @llvm.vp.scatter.v4i16.v4p0i16(<4 x i16>, <4 x i16*>, <4 x i1>, i32) + +define void @vpscatter_v4i16(<4 x i16> %val, <4 x i16*> %ptrs, <4 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_v4i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_v4i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t +; RV64-NEXT: ret + call void @llvm.vp.scatter.v4i16.v4p0i16(<4 x i16> %val, <4 x i16*> %ptrs, <4 x i1> %m, i32 %evl) + ret void +} + +define void @vpscatter_truemask_v4i16(<4 x i16> %val, <4 x i16*> %ptrs, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_truemask_v4i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV32-NEXT: vsoxei32.v v8, (zero), v9 +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_truemask_v4i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV64-NEXT: vsoxei64.v v8, (zero), v10 +; RV64-NEXT: ret + %mhead = insertelement <4 x i1> undef, i1 1, i32 0 + %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer + call void @llvm.vp.scatter.v4i16.v4p0i16(<4 x i16> %val, <4 x i16*> %ptrs, <4 x i1> %mtrue, i32 %evl) + ret void +} + +declare void @llvm.vp.scatter.v8i16.v8p0i16(<8 x i16>, <8 x i16*>, <8 x i1>, i32) + +define void @vpscatter_v8i16(<8 x i16> %val, <8 x i16*> %ptrs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_v8i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_v8i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t +; RV64-NEXT: ret + call void @llvm.vp.scatter.v8i16.v8p0i16(<8 x i16> %val, <8 x i16*> %ptrs, <8 x i1> %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_v8i8_v8i16(<8 x i16> %val, i16* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_baseidx_v8i8_v8i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsext.vf4 v26, v9 +; RV32-NEXT: vadd.vv v26, v26, v26 +; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; RV32-NEXT: vsoxei32.v v8, (a0), v26, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_baseidx_v8i8_v8i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsext.vf8 v28, v9 +; RV64-NEXT: vadd.vv v28, v28, v28 +; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; RV64-NEXT: vsoxei64.v v8, (a0), v28, v0.t +; RV64-NEXT: ret + %ptrs = getelementptr inbounds i16, i16* %base, <8 x i8> %idxs + call void @llvm.vp.scatter.v8i16.v8p0i16(<8 x i16> %val, <8 x i16*> %ptrs, <8 x i1> %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_sext_v8i8_v8i16(<8 x i16> %val, i16* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_baseidx_sext_v8i8_v8i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsext.vf4 v26, v9 +; RV32-NEXT: vadd.vv v26, v26, v26 +; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; RV32-NEXT: vsoxei32.v v8, (a0), v26, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_baseidx_sext_v8i8_v8i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsext.vf8 v28, v9 +; RV64-NEXT: vadd.vv v28, v28, v28 +; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; RV64-NEXT: vsoxei64.v v8, (a0), v28, v0.t +; RV64-NEXT: ret + %eidxs = sext <8 x i8> %idxs to <8 x i16> + %ptrs = getelementptr inbounds i16, i16* %base, <8 x i16> %eidxs + call void @llvm.vp.scatter.v8i16.v8p0i16(<8 x i16> %val, <8 x i16*> %ptrs, <8 x i1> %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_zext_v8i8_v8i16(<8 x i16> %val, i16* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_baseidx_zext_v8i8_v8i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vzext.vf4 v26, v9 +; RV32-NEXT: vadd.vv v26, v26, v26 +; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; RV32-NEXT: vsoxei32.v v8, (a0), v26, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_baseidx_zext_v8i8_v8i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vzext.vf8 v28, v9 +; RV64-NEXT: vadd.vv v28, v28, v28 +; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; RV64-NEXT: vsoxei64.v v8, (a0), v28, v0.t +; RV64-NEXT: ret + %eidxs = zext <8 x i8> %idxs to <8 x i16> + %ptrs = getelementptr inbounds i16, i16* %base, <8 x i16> %eidxs + call void @llvm.vp.scatter.v8i16.v8p0i16(<8 x i16> %val, <8 x i16*> %ptrs, <8 x i1> %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_v8i16(<8 x i16> %val, i16* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_baseidx_v8i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsext.vf2 v26, v9 +; RV32-NEXT: vadd.vv v26, v26, v26 +; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; RV32-NEXT: vsoxei32.v v8, (a0), v26, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_baseidx_v8i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsext.vf4 v28, v9 +; RV64-NEXT: vadd.vv v28, v28, v28 +; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; RV64-NEXT: vsoxei64.v v8, (a0), v28, v0.t +; RV64-NEXT: ret + %ptrs = getelementptr inbounds i16, i16* %base, <8 x i16> %idxs + call void @llvm.vp.scatter.v8i16.v8p0i16(<8 x i16> %val, <8 x i16*> %ptrs, <8 x i1> %m, i32 %evl) + ret void +} + +declare void @llvm.vp.scatter.v2i32.v2p0i32(<2 x i32>, <2 x i32*>, <2 x i1>, i32) + +define void @vpscatter_v2i32(<2 x i32> %val, <2 x i32*> %ptrs, <2 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_v2i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_v2i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t +; RV64-NEXT: ret + call void @llvm.vp.scatter.v2i32.v2p0i32(<2 x i32> %val, <2 x i32*> %ptrs, <2 x i1> %m, i32 %evl) + ret void +} + +define void @vpscatter_v2i64_truncstore_v2i32(<2 x i64> %val, <2 x i32*> %ptrs, <2 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_v2i64_truncstore_v2i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV32-NEXT: vnsrl.wi v25, v8, 0 +; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; RV32-NEXT: vsoxei32.v v25, (zero), v9, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_v2i64_truncstore_v2i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV64-NEXT: vnsrl.wi v25, v8, 0 +; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; RV64-NEXT: vsoxei64.v v25, (zero), v9, v0.t +; RV64-NEXT: ret + %tval = trunc <2 x i64> %val to <2 x i32> + call void @llvm.vp.scatter.v2i32.v2p0i32(<2 x i32> %tval, <2 x i32*> %ptrs, <2 x i1> %m, i32 %evl) + ret void +} + +declare void @llvm.vp.scatter.v4i32.v4p0i32(<4 x i32>, <4 x i32*>, <4 x i1>, i32) + +define void @vpscatter_v4i32(<4 x i32> %val, <4 x i32*> %ptrs, <4 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_v4i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_v4i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t +; RV64-NEXT: ret + call void @llvm.vp.scatter.v4i32.v4p0i32(<4 x i32> %val, <4 x i32*> %ptrs, <4 x i1> %m, i32 %evl) + ret void +} + +define void @vpscatter_truemask_v4i32(<4 x i32> %val, <4 x i32*> %ptrs, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_truemask_v4i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; RV32-NEXT: vsoxei32.v v8, (zero), v9 +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_truemask_v4i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; RV64-NEXT: vsoxei64.v v8, (zero), v10 +; RV64-NEXT: ret + %mhead = insertelement <4 x i1> undef, i1 1, i32 0 + %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer + call void @llvm.vp.scatter.v4i32.v4p0i32(<4 x i32> %val, <4 x i32*> %ptrs, <4 x i1> %mtrue, i32 %evl) + ret void +} + +declare void @llvm.vp.scatter.v8i32.v8p0i32(<8 x i32>, <8 x i32*>, <8 x i1>, i32) + +define void @vpscatter_v8i32(<8 x i32> %val, <8 x i32*> %ptrs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_v8i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_v8i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t +; RV64-NEXT: ret + call void @llvm.vp.scatter.v8i32.v8p0i32(<8 x i32> %val, <8 x i32*> %ptrs, <8 x i1> %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_v8i8_v8i32(<8 x i32> %val, i32* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_baseidx_v8i8_v8i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsext.vf4 v26, v10 +; RV32-NEXT: vsll.vi v26, v26, 2 +; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV32-NEXT: vsoxei32.v v8, (a0), v26, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_baseidx_v8i8_v8i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsext.vf8 v28, v10 +; RV64-NEXT: vsll.vi v28, v28, 2 +; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV64-NEXT: vsoxei64.v v8, (a0), v28, v0.t +; RV64-NEXT: ret + %ptrs = getelementptr inbounds i32, i32* %base, <8 x i8> %idxs + call void @llvm.vp.scatter.v8i32.v8p0i32(<8 x i32> %val, <8 x i32*> %ptrs, <8 x i1> %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_sext_v8i8_v8i32(<8 x i32> %val, i32* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_baseidx_sext_v8i8_v8i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsext.vf4 v26, v10 +; RV32-NEXT: vsll.vi v26, v26, 2 +; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV32-NEXT: vsoxei32.v v8, (a0), v26, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_baseidx_sext_v8i8_v8i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsext.vf8 v28, v10 +; RV64-NEXT: vsll.vi v28, v28, 2 +; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV64-NEXT: vsoxei64.v v8, (a0), v28, v0.t +; RV64-NEXT: ret + %eidxs = sext <8 x i8> %idxs to <8 x i32> + %ptrs = getelementptr inbounds i32, i32* %base, <8 x i32> %eidxs + call void @llvm.vp.scatter.v8i32.v8p0i32(<8 x i32> %val, <8 x i32*> %ptrs, <8 x i1> %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_zext_v8i8_v8i32(<8 x i32> %val, i32* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_baseidx_zext_v8i8_v8i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vzext.vf4 v26, v10 +; RV32-NEXT: vsll.vi v26, v26, 2 +; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV32-NEXT: vsoxei32.v v8, (a0), v26, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_baseidx_zext_v8i8_v8i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vzext.vf8 v28, v10 +; RV64-NEXT: vsll.vi v28, v28, 2 +; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV64-NEXT: vsoxei64.v v8, (a0), v28, v0.t +; RV64-NEXT: ret + %eidxs = zext <8 x i8> %idxs to <8 x i32> + %ptrs = getelementptr inbounds i32, i32* %base, <8 x i32> %eidxs + call void @llvm.vp.scatter.v8i32.v8p0i32(<8 x i32> %val, <8 x i32*> %ptrs, <8 x i1> %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_v8i16_v8i32(<8 x i32> %val, i32* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_baseidx_v8i16_v8i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsext.vf2 v26, v10 +; RV32-NEXT: vsll.vi v26, v26, 2 +; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV32-NEXT: vsoxei32.v v8, (a0), v26, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_baseidx_v8i16_v8i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsext.vf4 v28, v10 +; RV64-NEXT: vsll.vi v28, v28, 2 +; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV64-NEXT: vsoxei64.v v8, (a0), v28, v0.t +; RV64-NEXT: ret + %ptrs = getelementptr inbounds i32, i32* %base, <8 x i16> %idxs + call void @llvm.vp.scatter.v8i32.v8p0i32(<8 x i32> %val, <8 x i32*> %ptrs, <8 x i1> %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_sext_v8i16_v8i32(<8 x i32> %val, i32* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_baseidx_sext_v8i16_v8i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsext.vf2 v26, v10 +; RV32-NEXT: vsll.vi v26, v26, 2 +; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV32-NEXT: vsoxei32.v v8, (a0), v26, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_baseidx_sext_v8i16_v8i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsext.vf4 v28, v10 +; RV64-NEXT: vsll.vi v28, v28, 2 +; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV64-NEXT: vsoxei64.v v8, (a0), v28, v0.t +; RV64-NEXT: ret + %eidxs = sext <8 x i16> %idxs to <8 x i32> + %ptrs = getelementptr inbounds i32, i32* %base, <8 x i32> %eidxs + call void @llvm.vp.scatter.v8i32.v8p0i32(<8 x i32> %val, <8 x i32*> %ptrs, <8 x i1> %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_zext_v8i16_v8i32(<8 x i32> %val, i32* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_baseidx_zext_v8i16_v8i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vzext.vf2 v26, v10 +; RV32-NEXT: vsll.vi v26, v26, 2 +; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV32-NEXT: vsoxei32.v v8, (a0), v26, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_baseidx_zext_v8i16_v8i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vzext.vf4 v28, v10 +; RV64-NEXT: vsll.vi v28, v28, 2 +; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV64-NEXT: vsoxei64.v v8, (a0), v28, v0.t +; RV64-NEXT: ret + %eidxs = zext <8 x i16> %idxs to <8 x i32> + %ptrs = getelementptr inbounds i32, i32* %base, <8 x i32> %eidxs + call void @llvm.vp.scatter.v8i32.v8p0i32(<8 x i32> %val, <8 x i32*> %ptrs, <8 x i1> %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_v8i32(<8 x i32> %val, i32* %base, <8 x i32> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_baseidx_v8i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsll.vi v26, v10, 2 +; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV32-NEXT: vsoxei32.v v8, (a0), v26, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_baseidx_v8i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsext.vf2 v28, v10 +; RV64-NEXT: vsll.vi v28, v28, 2 +; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV64-NEXT: vsoxei64.v v8, (a0), v28, v0.t +; RV64-NEXT: ret + %ptrs = getelementptr inbounds i32, i32* %base, <8 x i32> %idxs + call void @llvm.vp.scatter.v8i32.v8p0i32(<8 x i32> %val, <8 x i32*> %ptrs, <8 x i1> %m, i32 %evl) + ret void +} + +declare void @llvm.vp.scatter.v2i64.v2p0i64(<2 x i64>, <2 x i64*>, <2 x i1>, i32) + +define void @vpscatter_v2i64(<2 x i64> %val, <2 x i64*> %ptrs, <2 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_v2i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_v2i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t +; RV64-NEXT: ret + call void @llvm.vp.scatter.v2i64.v2p0i64(<2 x i64> %val, <2 x i64*> %ptrs, <2 x i1> %m, i32 %evl) + ret void +} + +declare void @llvm.vp.scatter.v4i64.v4p0i64(<4 x i64>, <4 x i64*>, <4 x i1>, i32) + +define void @vpscatter_v4i64(<4 x i64> %val, <4 x i64*> %ptrs, <4 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_v4i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_v4i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t +; RV64-NEXT: ret + call void @llvm.vp.scatter.v4i64.v4p0i64(<4 x i64> %val, <4 x i64*> %ptrs, <4 x i1> %m, i32 %evl) + ret void +} + +define void @vpscatter_truemask_v4i64(<4 x i64> %val, <4 x i64*> %ptrs, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_truemask_v4i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; RV32-NEXT: vsoxei32.v v8, (zero), v10 +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_truemask_v4i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; RV64-NEXT: vsoxei64.v v8, (zero), v10 +; RV64-NEXT: ret + %mhead = insertelement <4 x i1> undef, i1 1, i32 0 + %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer + call void @llvm.vp.scatter.v4i64.v4p0i64(<4 x i64> %val, <4 x i64*> %ptrs, <4 x i1> %mtrue, i32 %evl) + ret void +} + +declare void @llvm.vp.scatter.v8i64.v8p0i64(<8 x i64>, <8 x i64*>, <8 x i1>, i32) + +define void @vpscatter_v8i64(<8 x i64> %val, <8 x i64*> %ptrs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_v8i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; RV32-NEXT: vsoxei32.v v8, (zero), v12, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_v8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t +; RV64-NEXT: ret + call void @llvm.vp.scatter.v8i64.v8p0i64(<8 x i64> %val, <8 x i64*> %ptrs, <8 x i1> %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_v8i8_v8i64(<8 x i64> %val, i64* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_baseidx_v8i8_v8i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsext.vf4 v26, v12 +; RV32-NEXT: vsll.vi v26, v26, 3 +; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV32-NEXT: vsoxei32.v v8, (a0), v26, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_baseidx_v8i8_v8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsext.vf8 v28, v12 +; RV64-NEXT: vsll.vi v28, v28, 3 +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsoxei64.v v8, (a0), v28, v0.t +; RV64-NEXT: ret + %ptrs = getelementptr inbounds i64, i64* %base, <8 x i8> %idxs + call void @llvm.vp.scatter.v8i64.v8p0i64(<8 x i64> %val, <8 x i64*> %ptrs, <8 x i1> %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_sext_v8i8_v8i64(<8 x i64> %val, i64* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpscatter_baseidx_sext_v8i8_v8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vsext.vf8 v28, v12 +; CHECK-NEXT: vsll.vi v28, v28, 3 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsoxei64.v v8, (a0), v28, v0.t +; CHECK-NEXT: ret + %eidxs = sext <8 x i8> %idxs to <8 x i64> + %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs + call void @llvm.vp.scatter.v8i64.v8p0i64(<8 x i64> %val, <8 x i64*> %ptrs, <8 x i1> %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_zext_v8i8_v8i64(<8 x i64> %val, i64* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpscatter_baseidx_zext_v8i8_v8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vzext.vf8 v28, v12 +; CHECK-NEXT: vsll.vi v28, v28, 3 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsoxei64.v v8, (a0), v28, v0.t +; CHECK-NEXT: ret + %eidxs = zext <8 x i8> %idxs to <8 x i64> + %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs + call void @llvm.vp.scatter.v8i64.v8p0i64(<8 x i64> %val, <8 x i64*> %ptrs, <8 x i1> %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_v8i16_v8i64(<8 x i64> %val, i64* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_baseidx_v8i16_v8i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsext.vf2 v26, v12 +; RV32-NEXT: vsll.vi v26, v26, 3 +; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV32-NEXT: vsoxei32.v v8, (a0), v26, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_baseidx_v8i16_v8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsext.vf4 v28, v12 +; RV64-NEXT: vsll.vi v28, v28, 3 +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsoxei64.v v8, (a0), v28, v0.t +; RV64-NEXT: ret + %ptrs = getelementptr inbounds i64, i64* %base, <8 x i16> %idxs + call void @llvm.vp.scatter.v8i64.v8p0i64(<8 x i64> %val, <8 x i64*> %ptrs, <8 x i1> %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_sext_v8i16_v8i64(<8 x i64> %val, i64* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpscatter_baseidx_sext_v8i16_v8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vsext.vf4 v28, v12 +; CHECK-NEXT: vsll.vi v28, v28, 3 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsoxei64.v v8, (a0), v28, v0.t +; CHECK-NEXT: ret + %eidxs = sext <8 x i16> %idxs to <8 x i64> + %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs + call void @llvm.vp.scatter.v8i64.v8p0i64(<8 x i64> %val, <8 x i64*> %ptrs, <8 x i1> %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_zext_v8i16_v8i64(<8 x i64> %val, i64* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpscatter_baseidx_zext_v8i16_v8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vzext.vf4 v28, v12 +; CHECK-NEXT: vsll.vi v28, v28, 3 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsoxei64.v v8, (a0), v28, v0.t +; CHECK-NEXT: ret + %eidxs = zext <8 x i16> %idxs to <8 x i64> + %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs + call void @llvm.vp.scatter.v8i64.v8p0i64(<8 x i64> %val, <8 x i64*> %ptrs, <8 x i1> %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_v8i32_v8i64(<8 x i64> %val, i64* %base, <8 x i32> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_baseidx_v8i32_v8i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsll.vi v26, v12, 3 +; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV32-NEXT: vsoxei32.v v8, (a0), v26, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_baseidx_v8i32_v8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsext.vf2 v28, v12 +; RV64-NEXT: vsll.vi v28, v28, 3 +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsoxei64.v v8, (a0), v28, v0.t +; RV64-NEXT: ret + %ptrs = getelementptr inbounds i64, i64* %base, <8 x i32> %idxs + call void @llvm.vp.scatter.v8i64.v8p0i64(<8 x i64> %val, <8 x i64*> %ptrs, <8 x i1> %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_sext_v8i32_v8i64(<8 x i64> %val, i64* %base, <8 x i32> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpscatter_baseidx_sext_v8i32_v8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vsext.vf2 v28, v12 +; CHECK-NEXT: vsll.vi v28, v28, 3 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsoxei64.v v8, (a0), v28, v0.t +; CHECK-NEXT: ret + %eidxs = sext <8 x i32> %idxs to <8 x i64> + %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs + call void @llvm.vp.scatter.v8i64.v8p0i64(<8 x i64> %val, <8 x i64*> %ptrs, <8 x i1> %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_zext_v8i32_v8i64(<8 x i64> %val, i64* %base, <8 x i32> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpscatter_baseidx_zext_v8i32_v8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vzext.vf2 v28, v12 +; CHECK-NEXT: vsll.vi v28, v28, 3 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsoxei64.v v8, (a0), v28, v0.t +; CHECK-NEXT: ret + %eidxs = zext <8 x i32> %idxs to <8 x i64> + %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs + call void @llvm.vp.scatter.v8i64.v8p0i64(<8 x i64> %val, <8 x i64*> %ptrs, <8 x i1> %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_v8i64(<8 x i64> %val, i64* %base, <8 x i64> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpscatter_baseidx_v8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vsll.vi v28, v12, 3 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsoxei64.v v8, (a0), v28, v0.t +; CHECK-NEXT: ret + %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %idxs + call void @llvm.vp.scatter.v8i64.v8p0i64(<8 x i64> %val, <8 x i64*> %ptrs, <8 x i1> %m, i32 %evl) + ret void +} + +declare void @llvm.vp.scatter.v2f16.v2p0f16(<2 x half>, <2 x half*>, <2 x i1>, i32) + +define void @vpscatter_v2f16(<2 x half> %val, <2 x half*> %ptrs, <2 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_v2f16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_v2f16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t +; RV64-NEXT: ret + call void @llvm.vp.scatter.v2f16.v2p0f16(<2 x half> %val, <2 x half*> %ptrs, <2 x i1> %m, i32 %evl) + ret void +} + +declare void @llvm.vp.scatter.v4f16.v4p0f16(<4 x half>, <4 x half*>, <4 x i1>, i32) + +define void @vpscatter_v4f16(<4 x half> %val, <4 x half*> %ptrs, <4 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_v4f16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_v4f16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t +; RV64-NEXT: ret + call void @llvm.vp.scatter.v4f16.v4p0f16(<4 x half> %val, <4 x half*> %ptrs, <4 x i1> %m, i32 %evl) + ret void +} + +define void @vpscatter_truemask_v4f16(<4 x half> %val, <4 x half*> %ptrs, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_truemask_v4f16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV32-NEXT: vsoxei32.v v8, (zero), v9 +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_truemask_v4f16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV64-NEXT: vsoxei64.v v8, (zero), v10 +; RV64-NEXT: ret + %mhead = insertelement <4 x i1> undef, i1 1, i32 0 + %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer + call void @llvm.vp.scatter.v4f16.v4p0f16(<4 x half> %val, <4 x half*> %ptrs, <4 x i1> %mtrue, i32 %evl) + ret void +} + +declare void @llvm.vp.scatter.v8f16.v8p0f16(<8 x half>, <8 x half*>, <8 x i1>, i32) + +define void @vpscatter_v8f16(<8 x half> %val, <8 x half*> %ptrs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_v8f16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_v8f16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t +; RV64-NEXT: ret + call void @llvm.vp.scatter.v8f16.v8p0f16(<8 x half> %val, <8 x half*> %ptrs, <8 x i1> %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_v8i8_v8f16(<8 x half> %val, half* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_baseidx_v8i8_v8f16: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsext.vf4 v26, v9 +; RV32-NEXT: vadd.vv v26, v26, v26 +; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; RV32-NEXT: vsoxei32.v v8, (a0), v26, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_baseidx_v8i8_v8f16: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsext.vf8 v28, v9 +; RV64-NEXT: vadd.vv v28, v28, v28 +; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; RV64-NEXT: vsoxei64.v v8, (a0), v28, v0.t +; RV64-NEXT: ret + %ptrs = getelementptr inbounds half, half* %base, <8 x i8> %idxs + call void @llvm.vp.scatter.v8f16.v8p0f16(<8 x half> %val, <8 x half*> %ptrs, <8 x i1> %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_sext_v8i8_v8f16(<8 x half> %val, half* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_baseidx_sext_v8i8_v8f16: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsext.vf4 v26, v9 +; RV32-NEXT: vadd.vv v26, v26, v26 +; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; RV32-NEXT: vsoxei32.v v8, (a0), v26, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_baseidx_sext_v8i8_v8f16: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsext.vf8 v28, v9 +; RV64-NEXT: vadd.vv v28, v28, v28 +; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; RV64-NEXT: vsoxei64.v v8, (a0), v28, v0.t +; RV64-NEXT: ret + %eidxs = sext <8 x i8> %idxs to <8 x i16> + %ptrs = getelementptr inbounds half, half* %base, <8 x i16> %eidxs + call void @llvm.vp.scatter.v8f16.v8p0f16(<8 x half> %val, <8 x half*> %ptrs, <8 x i1> %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_zext_v8i8_v8f16(<8 x half> %val, half* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_baseidx_zext_v8i8_v8f16: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vzext.vf4 v26, v9 +; RV32-NEXT: vadd.vv v26, v26, v26 +; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; RV32-NEXT: vsoxei32.v v8, (a0), v26, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_baseidx_zext_v8i8_v8f16: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vzext.vf8 v28, v9 +; RV64-NEXT: vadd.vv v28, v28, v28 +; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; RV64-NEXT: vsoxei64.v v8, (a0), v28, v0.t +; RV64-NEXT: ret + %eidxs = zext <8 x i8> %idxs to <8 x i16> + %ptrs = getelementptr inbounds half, half* %base, <8 x i16> %eidxs + call void @llvm.vp.scatter.v8f16.v8p0f16(<8 x half> %val, <8 x half*> %ptrs, <8 x i1> %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_v8f16(<8 x half> %val, half* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_baseidx_v8f16: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsext.vf2 v26, v9 +; RV32-NEXT: vadd.vv v26, v26, v26 +; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; RV32-NEXT: vsoxei32.v v8, (a0), v26, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_baseidx_v8f16: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsext.vf4 v28, v9 +; RV64-NEXT: vadd.vv v28, v28, v28 +; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; RV64-NEXT: vsoxei64.v v8, (a0), v28, v0.t +; RV64-NEXT: ret + %ptrs = getelementptr inbounds half, half* %base, <8 x i16> %idxs + call void @llvm.vp.scatter.v8f16.v8p0f16(<8 x half> %val, <8 x half*> %ptrs, <8 x i1> %m, i32 %evl) + ret void +} + +declare void @llvm.vp.scatter.v2f32.v2p0f32(<2 x float>, <2 x float*>, <2 x i1>, i32) + +define void @vpscatter_v2f32(<2 x float> %val, <2 x float*> %ptrs, <2 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_v2f32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_v2f32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t +; RV64-NEXT: ret + call void @llvm.vp.scatter.v2f32.v2p0f32(<2 x float> %val, <2 x float*> %ptrs, <2 x i1> %m, i32 %evl) + ret void +} + +declare void @llvm.vp.scatter.v4f32.v4p0f32(<4 x float>, <4 x float*>, <4 x i1>, i32) + +define void @vpscatter_v4f32(<4 x float> %val, <4 x float*> %ptrs, <4 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_v4f32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_v4f32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t +; RV64-NEXT: ret + call void @llvm.vp.scatter.v4f32.v4p0f32(<4 x float> %val, <4 x float*> %ptrs, <4 x i1> %m, i32 %evl) + ret void +} + +define void @vpscatter_truemask_v4f32(<4 x float> %val, <4 x float*> %ptrs, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_truemask_v4f32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; RV32-NEXT: vsoxei32.v v8, (zero), v9 +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_truemask_v4f32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; RV64-NEXT: vsoxei64.v v8, (zero), v10 +; RV64-NEXT: ret + %mhead = insertelement <4 x i1> undef, i1 1, i32 0 + %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer + call void @llvm.vp.scatter.v4f32.v4p0f32(<4 x float> %val, <4 x float*> %ptrs, <4 x i1> %mtrue, i32 %evl) + ret void +} + +declare void @llvm.vp.scatter.v8f32.v8p0f32(<8 x float>, <8 x float*>, <8 x i1>, i32) + +define void @vpscatter_v8f32(<8 x float> %val, <8 x float*> %ptrs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_v8f32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_v8f32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t +; RV64-NEXT: ret + call void @llvm.vp.scatter.v8f32.v8p0f32(<8 x float> %val, <8 x float*> %ptrs, <8 x i1> %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_v8i8_v8f32(<8 x float> %val, float* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_baseidx_v8i8_v8f32: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsext.vf4 v26, v10 +; RV32-NEXT: vsll.vi v26, v26, 2 +; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV32-NEXT: vsoxei32.v v8, (a0), v26, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_baseidx_v8i8_v8f32: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsext.vf8 v28, v10 +; RV64-NEXT: vsll.vi v28, v28, 2 +; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV64-NEXT: vsoxei64.v v8, (a0), v28, v0.t +; RV64-NEXT: ret + %ptrs = getelementptr inbounds float, float* %base, <8 x i8> %idxs + call void @llvm.vp.scatter.v8f32.v8p0f32(<8 x float> %val, <8 x float*> %ptrs, <8 x i1> %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_sext_v8i8_v8f32(<8 x float> %val, float* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_baseidx_sext_v8i8_v8f32: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsext.vf4 v26, v10 +; RV32-NEXT: vsll.vi v26, v26, 2 +; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV32-NEXT: vsoxei32.v v8, (a0), v26, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_baseidx_sext_v8i8_v8f32: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsext.vf8 v28, v10 +; RV64-NEXT: vsll.vi v28, v28, 2 +; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV64-NEXT: vsoxei64.v v8, (a0), v28, v0.t +; RV64-NEXT: ret + %eidxs = sext <8 x i8> %idxs to <8 x i32> + %ptrs = getelementptr inbounds float, float* %base, <8 x i32> %eidxs + call void @llvm.vp.scatter.v8f32.v8p0f32(<8 x float> %val, <8 x float*> %ptrs, <8 x i1> %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_zext_v8i8_v8f32(<8 x float> %val, float* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_baseidx_zext_v8i8_v8f32: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vzext.vf4 v26, v10 +; RV32-NEXT: vsll.vi v26, v26, 2 +; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV32-NEXT: vsoxei32.v v8, (a0), v26, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_baseidx_zext_v8i8_v8f32: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vzext.vf8 v28, v10 +; RV64-NEXT: vsll.vi v28, v28, 2 +; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV64-NEXT: vsoxei64.v v8, (a0), v28, v0.t +; RV64-NEXT: ret + %eidxs = zext <8 x i8> %idxs to <8 x i32> + %ptrs = getelementptr inbounds float, float* %base, <8 x i32> %eidxs + call void @llvm.vp.scatter.v8f32.v8p0f32(<8 x float> %val, <8 x float*> %ptrs, <8 x i1> %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_v8i16_v8f32(<8 x float> %val, float* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_baseidx_v8i16_v8f32: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsext.vf2 v26, v10 +; RV32-NEXT: vsll.vi v26, v26, 2 +; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV32-NEXT: vsoxei32.v v8, (a0), v26, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_baseidx_v8i16_v8f32: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsext.vf4 v28, v10 +; RV64-NEXT: vsll.vi v28, v28, 2 +; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV64-NEXT: vsoxei64.v v8, (a0), v28, v0.t +; RV64-NEXT: ret + %ptrs = getelementptr inbounds float, float* %base, <8 x i16> %idxs + call void @llvm.vp.scatter.v8f32.v8p0f32(<8 x float> %val, <8 x float*> %ptrs, <8 x i1> %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_sext_v8i16_v8f32(<8 x float> %val, float* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_baseidx_sext_v8i16_v8f32: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsext.vf2 v26, v10 +; RV32-NEXT: vsll.vi v26, v26, 2 +; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV32-NEXT: vsoxei32.v v8, (a0), v26, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_baseidx_sext_v8i16_v8f32: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsext.vf4 v28, v10 +; RV64-NEXT: vsll.vi v28, v28, 2 +; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV64-NEXT: vsoxei64.v v8, (a0), v28, v0.t +; RV64-NEXT: ret + %eidxs = sext <8 x i16> %idxs to <8 x i32> + %ptrs = getelementptr inbounds float, float* %base, <8 x i32> %eidxs + call void @llvm.vp.scatter.v8f32.v8p0f32(<8 x float> %val, <8 x float*> %ptrs, <8 x i1> %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_zext_v8i16_v8f32(<8 x float> %val, float* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_baseidx_zext_v8i16_v8f32: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vzext.vf2 v26, v10 +; RV32-NEXT: vsll.vi v26, v26, 2 +; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV32-NEXT: vsoxei32.v v8, (a0), v26, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_baseidx_zext_v8i16_v8f32: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vzext.vf4 v28, v10 +; RV64-NEXT: vsll.vi v28, v28, 2 +; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV64-NEXT: vsoxei64.v v8, (a0), v28, v0.t +; RV64-NEXT: ret + %eidxs = zext <8 x i16> %idxs to <8 x i32> + %ptrs = getelementptr inbounds float, float* %base, <8 x i32> %eidxs + call void @llvm.vp.scatter.v8f32.v8p0f32(<8 x float> %val, <8 x float*> %ptrs, <8 x i1> %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_v8f32(<8 x float> %val, float* %base, <8 x i32> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_baseidx_v8f32: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsll.vi v26, v10, 2 +; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV32-NEXT: vsoxei32.v v8, (a0), v26, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_baseidx_v8f32: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsext.vf2 v28, v10 +; RV64-NEXT: vsll.vi v28, v28, 2 +; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV64-NEXT: vsoxei64.v v8, (a0), v28, v0.t +; RV64-NEXT: ret + %ptrs = getelementptr inbounds float, float* %base, <8 x i32> %idxs + call void @llvm.vp.scatter.v8f32.v8p0f32(<8 x float> %val, <8 x float*> %ptrs, <8 x i1> %m, i32 %evl) + ret void +} + +declare void @llvm.vp.scatter.v2f64.v2p0f64(<2 x double>, <2 x double*>, <2 x i1>, i32) + +define void @vpscatter_v2f64(<2 x double> %val, <2 x double*> %ptrs, <2 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_v2f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_v2f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t +; RV64-NEXT: ret + call void @llvm.vp.scatter.v2f64.v2p0f64(<2 x double> %val, <2 x double*> %ptrs, <2 x i1> %m, i32 %evl) + ret void +} + +declare void @llvm.vp.scatter.v4f64.v4p0f64(<4 x double>, <4 x double*>, <4 x i1>, i32) + +define void @vpscatter_v4f64(<4 x double> %val, <4 x double*> %ptrs, <4 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_v4f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_v4f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t +; RV64-NEXT: ret + call void @llvm.vp.scatter.v4f64.v4p0f64(<4 x double> %val, <4 x double*> %ptrs, <4 x i1> %m, i32 %evl) + ret void +} + +define void @vpscatter_truemask_v4f64(<4 x double> %val, <4 x double*> %ptrs, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_truemask_v4f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; RV32-NEXT: vsoxei32.v v8, (zero), v10 +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_truemask_v4f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; RV64-NEXT: vsoxei64.v v8, (zero), v10 +; RV64-NEXT: ret + %mhead = insertelement <4 x i1> undef, i1 1, i32 0 + %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer + call void @llvm.vp.scatter.v4f64.v4p0f64(<4 x double> %val, <4 x double*> %ptrs, <4 x i1> %mtrue, i32 %evl) + ret void +} + +declare void @llvm.vp.scatter.v8f64.v8p0f64(<8 x double>, <8 x double*>, <8 x i1>, i32) + +define void @vpscatter_v8f64(<8 x double> %val, <8 x double*> %ptrs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_v8f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; RV32-NEXT: vsoxei32.v v8, (zero), v12, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_v8f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t +; RV64-NEXT: ret + call void @llvm.vp.scatter.v8f64.v8p0f64(<8 x double> %val, <8 x double*> %ptrs, <8 x i1> %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_v8i8_v8f64(<8 x double> %val, double* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_baseidx_v8i8_v8f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsext.vf4 v26, v12 +; RV32-NEXT: vsll.vi v26, v26, 3 +; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV32-NEXT: vsoxei32.v v8, (a0), v26, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_baseidx_v8i8_v8f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsext.vf8 v28, v12 +; RV64-NEXT: vsll.vi v28, v28, 3 +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsoxei64.v v8, (a0), v28, v0.t +; RV64-NEXT: ret + %ptrs = getelementptr inbounds double, double* %base, <8 x i8> %idxs + call void @llvm.vp.scatter.v8f64.v8p0f64(<8 x double> %val, <8 x double*> %ptrs, <8 x i1> %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_sext_v8i8_v8f64(<8 x double> %val, double* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpscatter_baseidx_sext_v8i8_v8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vsext.vf8 v28, v12 +; CHECK-NEXT: vsll.vi v28, v28, 3 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsoxei64.v v8, (a0), v28, v0.t +; CHECK-NEXT: ret + %eidxs = sext <8 x i8> %idxs to <8 x i64> + %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs + call void @llvm.vp.scatter.v8f64.v8p0f64(<8 x double> %val, <8 x double*> %ptrs, <8 x i1> %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_zext_v8i8_v8f64(<8 x double> %val, double* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpscatter_baseidx_zext_v8i8_v8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vzext.vf8 v28, v12 +; CHECK-NEXT: vsll.vi v28, v28, 3 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsoxei64.v v8, (a0), v28, v0.t +; CHECK-NEXT: ret + %eidxs = zext <8 x i8> %idxs to <8 x i64> + %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs + call void @llvm.vp.scatter.v8f64.v8p0f64(<8 x double> %val, <8 x double*> %ptrs, <8 x i1> %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_v8i16_v8f64(<8 x double> %val, double* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_baseidx_v8i16_v8f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsext.vf2 v26, v12 +; RV32-NEXT: vsll.vi v26, v26, 3 +; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV32-NEXT: vsoxei32.v v8, (a0), v26, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_baseidx_v8i16_v8f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsext.vf4 v28, v12 +; RV64-NEXT: vsll.vi v28, v28, 3 +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsoxei64.v v8, (a0), v28, v0.t +; RV64-NEXT: ret + %ptrs = getelementptr inbounds double, double* %base, <8 x i16> %idxs + call void @llvm.vp.scatter.v8f64.v8p0f64(<8 x double> %val, <8 x double*> %ptrs, <8 x i1> %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_sext_v8i16_v8f64(<8 x double> %val, double* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpscatter_baseidx_sext_v8i16_v8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vsext.vf4 v28, v12 +; CHECK-NEXT: vsll.vi v28, v28, 3 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsoxei64.v v8, (a0), v28, v0.t +; CHECK-NEXT: ret + %eidxs = sext <8 x i16> %idxs to <8 x i64> + %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs + call void @llvm.vp.scatter.v8f64.v8p0f64(<8 x double> %val, <8 x double*> %ptrs, <8 x i1> %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_zext_v8i16_v8f64(<8 x double> %val, double* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpscatter_baseidx_zext_v8i16_v8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vzext.vf4 v28, v12 +; CHECK-NEXT: vsll.vi v28, v28, 3 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsoxei64.v v8, (a0), v28, v0.t +; CHECK-NEXT: ret + %eidxs = zext <8 x i16> %idxs to <8 x i64> + %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs + call void @llvm.vp.scatter.v8f64.v8p0f64(<8 x double> %val, <8 x double*> %ptrs, <8 x i1> %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_v8i32_v8f64(<8 x double> %val, double* %base, <8 x i32> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_baseidx_v8i32_v8f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsll.vi v26, v12, 3 +; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV32-NEXT: vsoxei32.v v8, (a0), v26, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_baseidx_v8i32_v8f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsext.vf2 v28, v12 +; RV64-NEXT: vsll.vi v28, v28, 3 +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsoxei64.v v8, (a0), v28, v0.t +; RV64-NEXT: ret + %ptrs = getelementptr inbounds double, double* %base, <8 x i32> %idxs + call void @llvm.vp.scatter.v8f64.v8p0f64(<8 x double> %val, <8 x double*> %ptrs, <8 x i1> %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_sext_v8i32_v8f64(<8 x double> %val, double* %base, <8 x i32> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpscatter_baseidx_sext_v8i32_v8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vsext.vf2 v28, v12 +; CHECK-NEXT: vsll.vi v28, v28, 3 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsoxei64.v v8, (a0), v28, v0.t +; CHECK-NEXT: ret + %eidxs = sext <8 x i32> %idxs to <8 x i64> + %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs + call void @llvm.vp.scatter.v8f64.v8p0f64(<8 x double> %val, <8 x double*> %ptrs, <8 x i1> %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_zext_v8i32_v8f64(<8 x double> %val, double* %base, <8 x i32> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpscatter_baseidx_zext_v8i32_v8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vzext.vf2 v28, v12 +; CHECK-NEXT: vsll.vi v28, v28, 3 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsoxei64.v v8, (a0), v28, v0.t +; CHECK-NEXT: ret + %eidxs = zext <8 x i32> %idxs to <8 x i64> + %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs + call void @llvm.vp.scatter.v8f64.v8p0f64(<8 x double> %val, <8 x double*> %ptrs, <8 x i1> %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_v8f64(<8 x double> %val, double* %base, <8 x i64> %idxs, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpscatter_baseidx_v8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vsll.vi v28, v12, 3 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsoxei64.v v8, (a0), v28, v0.t +; CHECK-NEXT: ret + %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %idxs + call void @llvm.vp.scatter.v8f64.v8p0f64(<8 x double> %val, <8 x double*> %ptrs, <8 x i1> %m, i32 %evl) + ret void +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll @@ -0,0 +1,1816 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zfh,+experimental-v \ +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zfh,+experimental-v \ +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 + +declare @llvm.vp.gather.nxv1i8.nxv1p0i8(, , i32) + +define @vpgather_nxv1i8( %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_nxv1i8: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; RV32-NEXT: vluxei32.v v25, (zero), v8, v0.t +; RV32-NEXT: vmv1r.v v8, v25 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_nxv1i8: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; RV64-NEXT: vluxei64.v v25, (zero), v8, v0.t +; RV64-NEXT: vmv1r.v v8, v25 +; RV64-NEXT: ret + %v = call @llvm.vp.gather.nxv1i8.nxv1p0i8( %ptrs, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.gather.nxv2i8.nxv2p0i8(, , i32) + +define @vpgather_nxv2i8( %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_nxv2i8: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; RV32-NEXT: vluxei32.v v25, (zero), v8, v0.t +; RV32-NEXT: vmv1r.v v8, v25 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_nxv2i8: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; RV64-NEXT: vluxei64.v v25, (zero), v8, v0.t +; RV64-NEXT: vmv1r.v v8, v25 +; RV64-NEXT: ret + %v = call @llvm.vp.gather.nxv2i8.nxv2p0i8( %ptrs, %m, i32 %evl) + ret %v +} + +define @vpgather_nxv2i8_sextload_nxv2i16( %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_nxv2i8_sextload_nxv2i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; RV32-NEXT: vluxei32.v v25, (zero), v8, v0.t +; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; RV32-NEXT: vsext.vf2 v8, v25 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_nxv2i8_sextload_nxv2i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; RV64-NEXT: vluxei64.v v25, (zero), v8, v0.t +; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; RV64-NEXT: vsext.vf2 v8, v25 +; RV64-NEXT: ret + %v = call @llvm.vp.gather.nxv2i8.nxv2p0i8( %ptrs, %m, i32 %evl) + %ev = sext %v to + ret %ev +} + +define @vpgather_nxv2i8_zextload_nxv2i16( %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_nxv2i8_zextload_nxv2i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; RV32-NEXT: vluxei32.v v25, (zero), v8, v0.t +; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; RV32-NEXT: vzext.vf2 v8, v25 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_nxv2i8_zextload_nxv2i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; RV64-NEXT: vluxei64.v v25, (zero), v8, v0.t +; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; RV64-NEXT: vzext.vf2 v8, v25 +; RV64-NEXT: ret + %v = call @llvm.vp.gather.nxv2i8.nxv2p0i8( %ptrs, %m, i32 %evl) + %ev = zext %v to + ret %ev +} + +define @vpgather_nxv2i8_sextload_nxv2i32( %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_nxv2i8_sextload_nxv2i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; RV32-NEXT: vluxei32.v v25, (zero), v8, v0.t +; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; RV32-NEXT: vsext.vf4 v8, v25 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_nxv2i8_sextload_nxv2i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; RV64-NEXT: vluxei64.v v25, (zero), v8, v0.t +; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; RV64-NEXT: vsext.vf4 v8, v25 +; RV64-NEXT: ret + %v = call @llvm.vp.gather.nxv2i8.nxv2p0i8( %ptrs, %m, i32 %evl) + %ev = sext %v to + ret %ev +} + +define @vpgather_nxv2i8_zextload_nxv2i32( %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_nxv2i8_zextload_nxv2i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; RV32-NEXT: vluxei32.v v25, (zero), v8, v0.t +; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; RV32-NEXT: vzext.vf4 v8, v25 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_nxv2i8_zextload_nxv2i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; RV64-NEXT: vluxei64.v v25, (zero), v8, v0.t +; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; RV64-NEXT: vzext.vf4 v8, v25 +; RV64-NEXT: ret + %v = call @llvm.vp.gather.nxv2i8.nxv2p0i8( %ptrs, %m, i32 %evl) + %ev = zext %v to + ret %ev +} + +define @vpgather_nxv2i8_sextload_nxv2i64( %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_nxv2i8_sextload_nxv2i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; RV32-NEXT: vluxei32.v v25, (zero), v8, v0.t +; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV32-NEXT: vsext.vf8 v8, v25 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_nxv2i8_sextload_nxv2i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; RV64-NEXT: vluxei64.v v25, (zero), v8, v0.t +; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV64-NEXT: vsext.vf8 v8, v25 +; RV64-NEXT: ret + %v = call @llvm.vp.gather.nxv2i8.nxv2p0i8( %ptrs, %m, i32 %evl) + %ev = sext %v to + ret %ev +} + +define @vpgather_nxv2i8_zextload_nxv2i64( %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_nxv2i8_zextload_nxv2i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; RV32-NEXT: vluxei32.v v25, (zero), v8, v0.t +; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV32-NEXT: vzext.vf8 v8, v25 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_nxv2i8_zextload_nxv2i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; RV64-NEXT: vluxei64.v v25, (zero), v8, v0.t +; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV64-NEXT: vzext.vf8 v8, v25 +; RV64-NEXT: ret + %v = call @llvm.vp.gather.nxv2i8.nxv2p0i8( %ptrs, %m, i32 %evl) + %ev = zext %v to + ret %ev +} + +declare @llvm.vp.gather.nxv4i8.nxv4p0i8(, , i32) + +define @vpgather_nxv4i8( %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_nxv4i8: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; RV32-NEXT: vluxei32.v v25, (zero), v8, v0.t +; RV32-NEXT: vmv1r.v v8, v25 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_nxv4i8: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; RV64-NEXT: vluxei64.v v25, (zero), v8, v0.t +; RV64-NEXT: vmv1r.v v8, v25 +; RV64-NEXT: ret + %v = call @llvm.vp.gather.nxv4i8.nxv4p0i8( %ptrs, %m, i32 %evl) + ret %v +} + +define @vpgather_truemask_nxv4i8( %ptrs, i32 zeroext %evl) { +; RV32-LABEL: vpgather_truemask_nxv4i8: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; RV32-NEXT: vluxei32.v v25, (zero), v8 +; RV32-NEXT: vmv1r.v v8, v25 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_truemask_nxv4i8: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; RV64-NEXT: vluxei64.v v25, (zero), v8 +; RV64-NEXT: vmv1r.v v8, v25 +; RV64-NEXT: ret + %mhead = insertelement undef, i1 1, i32 0 + %mtrue = shufflevector %mhead, undef, zeroinitializer + %v = call @llvm.vp.gather.nxv4i8.nxv4p0i8( %ptrs, %mtrue, i32 %evl) + ret %v +} + +declare @llvm.vp.gather.nxv8i8.nxv8p0i8(, , i32) + +define @vpgather_nxv8i8( %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_nxv8i8: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; RV32-NEXT: vluxei32.v v25, (zero), v8, v0.t +; RV32-NEXT: vmv1r.v v8, v25 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_nxv8i8: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; RV64-NEXT: vluxei64.v v25, (zero), v8, v0.t +; RV64-NEXT: vmv1r.v v8, v25 +; RV64-NEXT: ret + %v = call @llvm.vp.gather.nxv8i8.nxv8p0i8( %ptrs, %m, i32 %evl) + ret %v +} + +define @vpgather_baseidx_nxv8i8(i8* %base, %idxs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_baseidx_nxv8i8: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsext.vf4 v28, v8 +; RV32-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; RV32-NEXT: vluxei32.v v8, (a0), v28, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_baseidx_nxv8i8: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsext.vf8 v16, v8 +; RV64-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: ret + %ptrs = getelementptr inbounds i8, i8* %base, %idxs + %v = call @llvm.vp.gather.nxv8i8.nxv8p0i8( %ptrs, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.gather.nxv1i16.nxv1p0i16(, , i32) + +define @vpgather_nxv1i16( %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_nxv1i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; RV32-NEXT: vluxei32.v v25, (zero), v8, v0.t +; RV32-NEXT: vmv1r.v v8, v25 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_nxv1i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; RV64-NEXT: vluxei64.v v25, (zero), v8, v0.t +; RV64-NEXT: vmv1r.v v8, v25 +; RV64-NEXT: ret + %v = call @llvm.vp.gather.nxv1i16.nxv1p0i16( %ptrs, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.gather.nxv2i16.nxv2p0i16(, , i32) + +define @vpgather_nxv2i16( %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_nxv2i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV32-NEXT: vluxei32.v v25, (zero), v8, v0.t +; RV32-NEXT: vmv1r.v v8, v25 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_nxv2i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV64-NEXT: vluxei64.v v25, (zero), v8, v0.t +; RV64-NEXT: vmv1r.v v8, v25 +; RV64-NEXT: ret + %v = call @llvm.vp.gather.nxv2i16.nxv2p0i16( %ptrs, %m, i32 %evl) + ret %v +} + +define @vpgather_nxv2i16_sextload_nxv2i32( %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_nxv2i16_sextload_nxv2i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV32-NEXT: vluxei32.v v25, (zero), v8, v0.t +; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; RV32-NEXT: vsext.vf2 v8, v25 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_nxv2i16_sextload_nxv2i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV64-NEXT: vluxei64.v v25, (zero), v8, v0.t +; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; RV64-NEXT: vsext.vf2 v8, v25 +; RV64-NEXT: ret + %v = call @llvm.vp.gather.nxv2i16.nxv2p0i16( %ptrs, %m, i32 %evl) + %ev = sext %v to + ret %ev +} + +define @vpgather_nxv2i16_zextload_nxv2i32( %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_nxv2i16_zextload_nxv2i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV32-NEXT: vluxei32.v v25, (zero), v8, v0.t +; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; RV32-NEXT: vzext.vf2 v8, v25 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_nxv2i16_zextload_nxv2i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV64-NEXT: vluxei64.v v25, (zero), v8, v0.t +; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; RV64-NEXT: vzext.vf2 v8, v25 +; RV64-NEXT: ret + %v = call @llvm.vp.gather.nxv2i16.nxv2p0i16( %ptrs, %m, i32 %evl) + %ev = zext %v to + ret %ev +} + +define @vpgather_nxv2i16_sextload_nxv2i64( %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_nxv2i16_sextload_nxv2i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV32-NEXT: vluxei32.v v25, (zero), v8, v0.t +; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV32-NEXT: vsext.vf4 v8, v25 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_nxv2i16_sextload_nxv2i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV64-NEXT: vluxei64.v v25, (zero), v8, v0.t +; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV64-NEXT: vsext.vf4 v8, v25 +; RV64-NEXT: ret + %v = call @llvm.vp.gather.nxv2i16.nxv2p0i16( %ptrs, %m, i32 %evl) + %ev = sext %v to + ret %ev +} + +define @vpgather_nxv2i16_zextload_nxv2i64( %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_nxv2i16_zextload_nxv2i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV32-NEXT: vluxei32.v v25, (zero), v8, v0.t +; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV32-NEXT: vzext.vf4 v8, v25 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_nxv2i16_zextload_nxv2i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV64-NEXT: vluxei64.v v25, (zero), v8, v0.t +; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV64-NEXT: vzext.vf4 v8, v25 +; RV64-NEXT: ret + %v = call @llvm.vp.gather.nxv2i16.nxv2p0i16( %ptrs, %m, i32 %evl) + %ev = zext %v to + ret %ev +} + +declare @llvm.vp.gather.nxv4i16.nxv4p0i16(, , i32) + +define @vpgather_nxv4i16( %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_nxv4i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; RV32-NEXT: vluxei32.v v25, (zero), v8, v0.t +; RV32-NEXT: vmv1r.v v8, v25 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_nxv4i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; RV64-NEXT: vluxei64.v v25, (zero), v8, v0.t +; RV64-NEXT: vmv1r.v v8, v25 +; RV64-NEXT: ret + %v = call @llvm.vp.gather.nxv4i16.nxv4p0i16( %ptrs, %m, i32 %evl) + ret %v +} + +define @vpgather_truemask_nxv4i16( %ptrs, i32 zeroext %evl) { +; RV32-LABEL: vpgather_truemask_nxv4i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; RV32-NEXT: vluxei32.v v25, (zero), v8 +; RV32-NEXT: vmv1r.v v8, v25 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_truemask_nxv4i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; RV64-NEXT: vluxei64.v v25, (zero), v8 +; RV64-NEXT: vmv1r.v v8, v25 +; RV64-NEXT: ret + %mhead = insertelement undef, i1 1, i32 0 + %mtrue = shufflevector %mhead, undef, zeroinitializer + %v = call @llvm.vp.gather.nxv4i16.nxv4p0i16( %ptrs, %mtrue, i32 %evl) + ret %v +} + +declare @llvm.vp.gather.nxv8i16.nxv8p0i16(, , i32) + +define @vpgather_nxv8i16( %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_nxv8i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; RV32-NEXT: vluxei32.v v26, (zero), v8, v0.t +; RV32-NEXT: vmv2r.v v8, v26 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_nxv8i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; RV64-NEXT: vluxei64.v v26, (zero), v8, v0.t +; RV64-NEXT: vmv2r.v v8, v26 +; RV64-NEXT: ret + %v = call @llvm.vp.gather.nxv8i16.nxv8p0i16( %ptrs, %m, i32 %evl) + ret %v +} + +define @vpgather_baseidx_nxv8i8_nxv8i16(i16* %base, %idxs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_baseidx_nxv8i8_nxv8i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsext.vf4 v28, v8 +; RV32-NEXT: vadd.vv v28, v28, v28 +; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV32-NEXT: vluxei32.v v8, (a0), v28, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_baseidx_nxv8i8_nxv8i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsext.vf8 v16, v8 +; RV64-NEXT: vadd.vv v16, v16, v16 +; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: ret + %ptrs = getelementptr inbounds i16, i16* %base, %idxs + %v = call @llvm.vp.gather.nxv8i16.nxv8p0i16( %ptrs, %m, i32 %evl) + ret %v +} + +define @vpgather_baseidx_sext_nxv8i8_nxv8i16(i16* %base, %idxs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_baseidx_sext_nxv8i8_nxv8i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsext.vf4 v28, v8 +; RV32-NEXT: vadd.vv v28, v28, v28 +; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV32-NEXT: vluxei32.v v8, (a0), v28, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_baseidx_sext_nxv8i8_nxv8i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsext.vf8 v16, v8 +; RV64-NEXT: vadd.vv v16, v16, v16 +; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: ret + %eidxs = sext %idxs to + %ptrs = getelementptr inbounds i16, i16* %base, %eidxs + %v = call @llvm.vp.gather.nxv8i16.nxv8p0i16( %ptrs, %m, i32 %evl) + ret %v +} + +define @vpgather_baseidx_zext_nxv8i8_nxv8i16(i16* %base, %idxs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_baseidx_zext_nxv8i8_nxv8i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vzext.vf4 v28, v8 +; RV32-NEXT: vadd.vv v28, v28, v28 +; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV32-NEXT: vluxei32.v v8, (a0), v28, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_baseidx_zext_nxv8i8_nxv8i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vzext.vf8 v16, v8 +; RV64-NEXT: vadd.vv v16, v16, v16 +; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: ret + %eidxs = zext %idxs to + %ptrs = getelementptr inbounds i16, i16* %base, %eidxs + %v = call @llvm.vp.gather.nxv8i16.nxv8p0i16( %ptrs, %m, i32 %evl) + ret %v +} + +define @vpgather_baseidx_nxv8i16(i16* %base, %idxs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_baseidx_nxv8i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsext.vf2 v28, v8 +; RV32-NEXT: vadd.vv v28, v28, v28 +; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV32-NEXT: vluxei32.v v8, (a0), v28, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_baseidx_nxv8i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsext.vf4 v16, v8 +; RV64-NEXT: vadd.vv v16, v16, v16 +; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: ret + %ptrs = getelementptr inbounds i16, i16* %base, %idxs + %v = call @llvm.vp.gather.nxv8i16.nxv8p0i16( %ptrs, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.gather.nxv1i32.nxv1p0i32(, , i32) + +define @vpgather_nxv1i32( %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_nxv1i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; RV32-NEXT: vluxei32.v v8, (zero), v8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_nxv1i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; RV64-NEXT: vluxei64.v v25, (zero), v8, v0.t +; RV64-NEXT: vmv1r.v v8, v25 +; RV64-NEXT: ret + %v = call @llvm.vp.gather.nxv1i32.nxv1p0i32( %ptrs, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.gather.nxv2i32.nxv2p0i32(, , i32) + +define @vpgather_nxv2i32( %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_nxv2i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; RV32-NEXT: vluxei32.v v8, (zero), v8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_nxv2i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; RV64-NEXT: vluxei64.v v25, (zero), v8, v0.t +; RV64-NEXT: vmv1r.v v8, v25 +; RV64-NEXT: ret + %v = call @llvm.vp.gather.nxv2i32.nxv2p0i32( %ptrs, %m, i32 %evl) + ret %v +} + +define @vpgather_nxv2i32_sextload_nxv2i64( %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_nxv2i32_sextload_nxv2i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; RV32-NEXT: vluxei32.v v25, (zero), v8, v0.t +; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV32-NEXT: vsext.vf2 v8, v25 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_nxv2i32_sextload_nxv2i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; RV64-NEXT: vluxei64.v v25, (zero), v8, v0.t +; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV64-NEXT: vsext.vf2 v8, v25 +; RV64-NEXT: ret + %v = call @llvm.vp.gather.nxv2i32.nxv2p0i32( %ptrs, %m, i32 %evl) + %ev = sext %v to + ret %ev +} + +define @vpgather_nxv2i32_zextload_nxv2i64( %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_nxv2i32_zextload_nxv2i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; RV32-NEXT: vluxei32.v v25, (zero), v8, v0.t +; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV32-NEXT: vzext.vf2 v8, v25 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_nxv2i32_zextload_nxv2i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; RV64-NEXT: vluxei64.v v25, (zero), v8, v0.t +; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV64-NEXT: vzext.vf2 v8, v25 +; RV64-NEXT: ret + %v = call @llvm.vp.gather.nxv2i32.nxv2p0i32( %ptrs, %m, i32 %evl) + %ev = zext %v to + ret %ev +} + +declare @llvm.vp.gather.nxv4i32.nxv4p0i32(, , i32) + +define @vpgather_nxv4i32( %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_nxv4i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; RV32-NEXT: vluxei32.v v8, (zero), v8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_nxv4i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; RV64-NEXT: vluxei64.v v26, (zero), v8, v0.t +; RV64-NEXT: vmv2r.v v8, v26 +; RV64-NEXT: ret + %v = call @llvm.vp.gather.nxv4i32.nxv4p0i32( %ptrs, %m, i32 %evl) + ret %v +} + +define @vpgather_truemask_nxv4i32( %ptrs, i32 zeroext %evl) { +; RV32-LABEL: vpgather_truemask_nxv4i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; RV32-NEXT: vluxei32.v v8, (zero), v8 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_truemask_nxv4i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; RV64-NEXT: vluxei64.v v26, (zero), v8 +; RV64-NEXT: vmv2r.v v8, v26 +; RV64-NEXT: ret + %mhead = insertelement undef, i1 1, i32 0 + %mtrue = shufflevector %mhead, undef, zeroinitializer + %v = call @llvm.vp.gather.nxv4i32.nxv4p0i32( %ptrs, %mtrue, i32 %evl) + ret %v +} + +declare @llvm.vp.gather.nxv8i32.nxv8p0i32(, , i32) + +define @vpgather_nxv8i32( %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_nxv8i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; RV32-NEXT: vluxei32.v v8, (zero), v8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_nxv8i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; RV64-NEXT: vluxei64.v v28, (zero), v8, v0.t +; RV64-NEXT: vmv4r.v v8, v28 +; RV64-NEXT: ret + %v = call @llvm.vp.gather.nxv8i32.nxv8p0i32( %ptrs, %m, i32 %evl) + ret %v +} + +define @vpgather_baseidx_nxv8i8_nxv8i32(i32* %base, %idxs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_baseidx_nxv8i8_nxv8i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsext.vf4 v28, v8 +; RV32-NEXT: vsll.vi v28, v28, 2 +; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV32-NEXT: vluxei32.v v8, (a0), v28, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_baseidx_nxv8i8_nxv8i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsext.vf8 v16, v8 +; RV64-NEXT: vsll.vi v16, v16, 2 +; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: ret + %ptrs = getelementptr inbounds i32, i32* %base, %idxs + %v = call @llvm.vp.gather.nxv8i32.nxv8p0i32( %ptrs, %m, i32 %evl) + ret %v +} + +define @vpgather_baseidx_sext_nxv8i8_nxv8i32(i32* %base, %idxs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_baseidx_sext_nxv8i8_nxv8i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsext.vf4 v28, v8 +; RV32-NEXT: vsll.vi v28, v28, 2 +; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV32-NEXT: vluxei32.v v8, (a0), v28, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_baseidx_sext_nxv8i8_nxv8i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsext.vf8 v16, v8 +; RV64-NEXT: vsll.vi v16, v16, 2 +; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: ret + %eidxs = sext %idxs to + %ptrs = getelementptr inbounds i32, i32* %base, %eidxs + %v = call @llvm.vp.gather.nxv8i32.nxv8p0i32( %ptrs, %m, i32 %evl) + ret %v +} + +define @vpgather_baseidx_zext_nxv8i8_nxv8i32(i32* %base, %idxs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_baseidx_zext_nxv8i8_nxv8i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vzext.vf4 v28, v8 +; RV32-NEXT: vsll.vi v28, v28, 2 +; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV32-NEXT: vluxei32.v v8, (a0), v28, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_baseidx_zext_nxv8i8_nxv8i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vzext.vf8 v16, v8 +; RV64-NEXT: vsll.vi v16, v16, 2 +; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: ret + %eidxs = zext %idxs to + %ptrs = getelementptr inbounds i32, i32* %base, %eidxs + %v = call @llvm.vp.gather.nxv8i32.nxv8p0i32( %ptrs, %m, i32 %evl) + ret %v +} + +define @vpgather_baseidx_nxv8i16_nxv8i32(i32* %base, %idxs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_baseidx_nxv8i16_nxv8i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsext.vf2 v28, v8 +; RV32-NEXT: vsll.vi v28, v28, 2 +; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV32-NEXT: vluxei32.v v8, (a0), v28, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_baseidx_nxv8i16_nxv8i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsext.vf4 v16, v8 +; RV64-NEXT: vsll.vi v16, v16, 2 +; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: ret + %ptrs = getelementptr inbounds i32, i32* %base, %idxs + %v = call @llvm.vp.gather.nxv8i32.nxv8p0i32( %ptrs, %m, i32 %evl) + ret %v +} + +define @vpgather_baseidx_sext_nxv8i16_nxv8i32(i32* %base, %idxs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_baseidx_sext_nxv8i16_nxv8i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsext.vf2 v28, v8 +; RV32-NEXT: vsll.vi v28, v28, 2 +; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV32-NEXT: vluxei32.v v8, (a0), v28, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_baseidx_sext_nxv8i16_nxv8i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsext.vf4 v16, v8 +; RV64-NEXT: vsll.vi v16, v16, 2 +; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: ret + %eidxs = sext %idxs to + %ptrs = getelementptr inbounds i32, i32* %base, %eidxs + %v = call @llvm.vp.gather.nxv8i32.nxv8p0i32( %ptrs, %m, i32 %evl) + ret %v +} + +define @vpgather_baseidx_zext_nxv8i16_nxv8i32(i32* %base, %idxs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_baseidx_zext_nxv8i16_nxv8i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vzext.vf2 v28, v8 +; RV32-NEXT: vsll.vi v28, v28, 2 +; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV32-NEXT: vluxei32.v v8, (a0), v28, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_baseidx_zext_nxv8i16_nxv8i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vzext.vf4 v16, v8 +; RV64-NEXT: vsll.vi v16, v16, 2 +; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: ret + %eidxs = zext %idxs to + %ptrs = getelementptr inbounds i32, i32* %base, %eidxs + %v = call @llvm.vp.gather.nxv8i32.nxv8p0i32( %ptrs, %m, i32 %evl) + ret %v +} + +define @vpgather_baseidx_nxv8i32(i32* %base, %idxs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_baseidx_nxv8i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsll.vi v28, v8, 2 +; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV32-NEXT: vluxei32.v v8, (a0), v28, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_baseidx_nxv8i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsext.vf2 v16, v8 +; RV64-NEXT: vsll.vi v16, v16, 2 +; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: ret + %ptrs = getelementptr inbounds i32, i32* %base, %idxs + %v = call @llvm.vp.gather.nxv8i32.nxv8p0i32( %ptrs, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.gather.nxv1i64.nxv1p0i64(, , i32) + +define @vpgather_nxv1i64( %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_nxv1i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; RV32-NEXT: vluxei32.v v25, (zero), v8, v0.t +; RV32-NEXT: vmv1r.v v8, v25 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_nxv1i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; RV64-NEXT: vluxei64.v v8, (zero), v8, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.gather.nxv1i64.nxv1p0i64( %ptrs, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.gather.nxv2i64.nxv2p0i64(, , i32) + +define @vpgather_nxv2i64( %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_nxv2i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; RV32-NEXT: vluxei32.v v26, (zero), v8, v0.t +; RV32-NEXT: vmv2r.v v8, v26 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_nxv2i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; RV64-NEXT: vluxei64.v v8, (zero), v8, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.gather.nxv2i64.nxv2p0i64( %ptrs, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.gather.nxv4i64.nxv4p0i64(, , i32) + +define @vpgather_nxv4i64( %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_nxv4i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; RV32-NEXT: vluxei32.v v28, (zero), v8, v0.t +; RV32-NEXT: vmv4r.v v8, v28 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_nxv4i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; RV64-NEXT: vluxei64.v v8, (zero), v8, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.gather.nxv4i64.nxv4p0i64( %ptrs, %m, i32 %evl) + ret %v +} + +define @vpgather_truemask_nxv4i64( %ptrs, i32 zeroext %evl) { +; RV32-LABEL: vpgather_truemask_nxv4i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; RV32-NEXT: vluxei32.v v28, (zero), v8 +; RV32-NEXT: vmv4r.v v8, v28 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_truemask_nxv4i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; RV64-NEXT: vluxei64.v v8, (zero), v8 +; RV64-NEXT: ret + %mhead = insertelement undef, i1 1, i32 0 + %mtrue = shufflevector %mhead, undef, zeroinitializer + %v = call @llvm.vp.gather.nxv4i64.nxv4p0i64( %ptrs, %mtrue, i32 %evl) + ret %v +} + +declare @llvm.vp.gather.nxv8i64.nxv8p0i64(, , i32) + +define @vpgather_nxv8i64( %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_nxv8i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; RV32-NEXT: vluxei32.v v16, (zero), v8, v0.t +; RV32-NEXT: vmv8r.v v8, v16 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_nxv8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; RV64-NEXT: vluxei64.v v8, (zero), v8, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.gather.nxv8i64.nxv8p0i64( %ptrs, %m, i32 %evl) + ret %v +} + +define @vpgather_baseidx_nxv8i8_nxv8i64(i64* %base, %idxs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_baseidx_nxv8i8_nxv8i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsext.vf4 v28, v8 +; RV32-NEXT: vsll.vi v28, v28, 3 +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vluxei32.v v8, (a0), v28, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_baseidx_nxv8i8_nxv8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsext.vf8 v16, v8 +; RV64-NEXT: vsll.vi v8, v16, 3 +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t +; RV64-NEXT: ret + %ptrs = getelementptr inbounds i64, i64* %base, %idxs + %v = call @llvm.vp.gather.nxv8i64.nxv8p0i64( %ptrs, %m, i32 %evl) + ret %v +} + +define @vpgather_baseidx_sext_nxv8i8_nxv8i64(i64* %base, %idxs, %m, i32 zeroext %evl) { +; CHECK-LABEL: vpgather_baseidx_sext_nxv8i8_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; CHECK-NEXT: vsext.vf8 v16, v8 +; CHECK-NEXT: vsll.vi v8, v16, 3 +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t +; CHECK-NEXT: ret + %eidxs = sext %idxs to + %ptrs = getelementptr inbounds i64, i64* %base, %eidxs + %v = call @llvm.vp.gather.nxv8i64.nxv8p0i64( %ptrs, %m, i32 %evl) + ret %v +} + +define @vpgather_baseidx_zext_nxv8i8_nxv8i64(i64* %base, %idxs, %m, i32 zeroext %evl) { +; CHECK-LABEL: vpgather_baseidx_zext_nxv8i8_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; CHECK-NEXT: vzext.vf8 v16, v8 +; CHECK-NEXT: vsll.vi v8, v16, 3 +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t +; CHECK-NEXT: ret + %eidxs = zext %idxs to + %ptrs = getelementptr inbounds i64, i64* %base, %eidxs + %v = call @llvm.vp.gather.nxv8i64.nxv8p0i64( %ptrs, %m, i32 %evl) + ret %v +} + +define @vpgather_baseidx_nxv8i16_nxv8i64(i64* %base, %idxs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_baseidx_nxv8i16_nxv8i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsext.vf2 v28, v8 +; RV32-NEXT: vsll.vi v28, v28, 3 +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vluxei32.v v8, (a0), v28, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_baseidx_nxv8i16_nxv8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsext.vf4 v16, v8 +; RV64-NEXT: vsll.vi v8, v16, 3 +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t +; RV64-NEXT: ret + %ptrs = getelementptr inbounds i64, i64* %base, %idxs + %v = call @llvm.vp.gather.nxv8i64.nxv8p0i64( %ptrs, %m, i32 %evl) + ret %v +} + +define @vpgather_baseidx_sext_nxv8i16_nxv8i64(i64* %base, %idxs, %m, i32 zeroext %evl) { +; CHECK-LABEL: vpgather_baseidx_sext_nxv8i16_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; CHECK-NEXT: vsext.vf4 v16, v8 +; CHECK-NEXT: vsll.vi v8, v16, 3 +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t +; CHECK-NEXT: ret + %eidxs = sext %idxs to + %ptrs = getelementptr inbounds i64, i64* %base, %eidxs + %v = call @llvm.vp.gather.nxv8i64.nxv8p0i64( %ptrs, %m, i32 %evl) + ret %v +} + +define @vpgather_baseidx_zext_nxv8i16_nxv8i64(i64* %base, %idxs, %m, i32 zeroext %evl) { +; CHECK-LABEL: vpgather_baseidx_zext_nxv8i16_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; CHECK-NEXT: vzext.vf4 v16, v8 +; CHECK-NEXT: vsll.vi v8, v16, 3 +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t +; CHECK-NEXT: ret + %eidxs = zext %idxs to + %ptrs = getelementptr inbounds i64, i64* %base, %eidxs + %v = call @llvm.vp.gather.nxv8i64.nxv8p0i64( %ptrs, %m, i32 %evl) + ret %v +} + +define @vpgather_baseidx_nxv8i32_nxv8i64(i64* %base, %idxs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_baseidx_nxv8i32_nxv8i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsll.vi v28, v8, 3 +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vluxei32.v v8, (a0), v28, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_baseidx_nxv8i32_nxv8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsext.vf2 v16, v8 +; RV64-NEXT: vsll.vi v8, v16, 3 +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t +; RV64-NEXT: ret + %ptrs = getelementptr inbounds i64, i64* %base, %idxs + %v = call @llvm.vp.gather.nxv8i64.nxv8p0i64( %ptrs, %m, i32 %evl) + ret %v +} + +define @vpgather_baseidx_sext_nxv8i32_nxv8i64(i64* %base, %idxs, %m, i32 zeroext %evl) { +; CHECK-LABEL: vpgather_baseidx_sext_nxv8i32_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; CHECK-NEXT: vsext.vf2 v16, v8 +; CHECK-NEXT: vsll.vi v8, v16, 3 +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t +; CHECK-NEXT: ret + %eidxs = sext %idxs to + %ptrs = getelementptr inbounds i64, i64* %base, %eidxs + %v = call @llvm.vp.gather.nxv8i64.nxv8p0i64( %ptrs, %m, i32 %evl) + ret %v +} + +define @vpgather_baseidx_zext_nxv8i32_nxv8i64(i64* %base, %idxs, %m, i32 zeroext %evl) { +; CHECK-LABEL: vpgather_baseidx_zext_nxv8i32_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; CHECK-NEXT: vzext.vf2 v16, v8 +; CHECK-NEXT: vsll.vi v8, v16, 3 +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t +; CHECK-NEXT: ret + %eidxs = zext %idxs to + %ptrs = getelementptr inbounds i64, i64* %base, %eidxs + %v = call @llvm.vp.gather.nxv8i64.nxv8p0i64( %ptrs, %m, i32 %evl) + ret %v +} + +define @vpgather_baseidx_nxv8i64(i64* %base, %idxs, %m, i32 zeroext %evl) { +; CHECK-LABEL: vpgather_baseidx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; CHECK-NEXT: vsll.vi v8, v8, 3 +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t +; CHECK-NEXT: ret + %ptrs = getelementptr inbounds i64, i64* %base, %idxs + %v = call @llvm.vp.gather.nxv8i64.nxv8p0i64( %ptrs, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.gather.nxv1f16.nxv1p0f16(, , i32) + +define @vpgather_nxv1f16( %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_nxv1f16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; RV32-NEXT: vluxei32.v v25, (zero), v8, v0.t +; RV32-NEXT: vmv1r.v v8, v25 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_nxv1f16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; RV64-NEXT: vluxei64.v v25, (zero), v8, v0.t +; RV64-NEXT: vmv1r.v v8, v25 +; RV64-NEXT: ret + %v = call @llvm.vp.gather.nxv1f16.nxv1p0f16( %ptrs, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.gather.nxv2f16.nxv2p0f16(, , i32) + +define @vpgather_nxv2f16( %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_nxv2f16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV32-NEXT: vluxei32.v v25, (zero), v8, v0.t +; RV32-NEXT: vmv1r.v v8, v25 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_nxv2f16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV64-NEXT: vluxei64.v v25, (zero), v8, v0.t +; RV64-NEXT: vmv1r.v v8, v25 +; RV64-NEXT: ret + %v = call @llvm.vp.gather.nxv2f16.nxv2p0f16( %ptrs, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.gather.nxv4f16.nxv4p0f16(, , i32) + +define @vpgather_nxv4f16( %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_nxv4f16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; RV32-NEXT: vluxei32.v v25, (zero), v8, v0.t +; RV32-NEXT: vmv1r.v v8, v25 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_nxv4f16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; RV64-NEXT: vluxei64.v v25, (zero), v8, v0.t +; RV64-NEXT: vmv1r.v v8, v25 +; RV64-NEXT: ret + %v = call @llvm.vp.gather.nxv4f16.nxv4p0f16( %ptrs, %m, i32 %evl) + ret %v +} + +define @vpgather_truemask_nxv4f16( %ptrs, i32 zeroext %evl) { +; RV32-LABEL: vpgather_truemask_nxv4f16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; RV32-NEXT: vluxei32.v v25, (zero), v8 +; RV32-NEXT: vmv1r.v v8, v25 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_truemask_nxv4f16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; RV64-NEXT: vluxei64.v v25, (zero), v8 +; RV64-NEXT: vmv1r.v v8, v25 +; RV64-NEXT: ret + %mhead = insertelement undef, i1 1, i32 0 + %mtrue = shufflevector %mhead, undef, zeroinitializer + %v = call @llvm.vp.gather.nxv4f16.nxv4p0f16( %ptrs, %mtrue, i32 %evl) + ret %v +} + +declare @llvm.vp.gather.nxv8f16.nxv8p0f16(, , i32) + +define @vpgather_nxv8f16( %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_nxv8f16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; RV32-NEXT: vluxei32.v v26, (zero), v8, v0.t +; RV32-NEXT: vmv2r.v v8, v26 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_nxv8f16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; RV64-NEXT: vluxei64.v v26, (zero), v8, v0.t +; RV64-NEXT: vmv2r.v v8, v26 +; RV64-NEXT: ret + %v = call @llvm.vp.gather.nxv8f16.nxv8p0f16( %ptrs, %m, i32 %evl) + ret %v +} + +define @vpgather_baseidx_nxv8i8_nxv8f16(half* %base, %idxs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_baseidx_nxv8i8_nxv8f16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsext.vf4 v28, v8 +; RV32-NEXT: vadd.vv v28, v28, v28 +; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV32-NEXT: vluxei32.v v8, (a0), v28, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_baseidx_nxv8i8_nxv8f16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsext.vf8 v16, v8 +; RV64-NEXT: vadd.vv v16, v16, v16 +; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: ret + %ptrs = getelementptr inbounds half, half* %base, %idxs + %v = call @llvm.vp.gather.nxv8f16.nxv8p0f16( %ptrs, %m, i32 %evl) + ret %v +} + +define @vpgather_baseidx_sext_nxv8i8_nxv8f16(half* %base, %idxs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_baseidx_sext_nxv8i8_nxv8f16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsext.vf4 v28, v8 +; RV32-NEXT: vadd.vv v28, v28, v28 +; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV32-NEXT: vluxei32.v v8, (a0), v28, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_baseidx_sext_nxv8i8_nxv8f16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsext.vf8 v16, v8 +; RV64-NEXT: vadd.vv v16, v16, v16 +; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: ret + %eidxs = sext %idxs to + %ptrs = getelementptr inbounds half, half* %base, %eidxs + %v = call @llvm.vp.gather.nxv8f16.nxv8p0f16( %ptrs, %m, i32 %evl) + ret %v +} + +define @vpgather_baseidx_zext_nxv8i8_nxv8f16(half* %base, %idxs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_baseidx_zext_nxv8i8_nxv8f16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vzext.vf4 v28, v8 +; RV32-NEXT: vadd.vv v28, v28, v28 +; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV32-NEXT: vluxei32.v v8, (a0), v28, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_baseidx_zext_nxv8i8_nxv8f16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vzext.vf8 v16, v8 +; RV64-NEXT: vadd.vv v16, v16, v16 +; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: ret + %eidxs = zext %idxs to + %ptrs = getelementptr inbounds half, half* %base, %eidxs + %v = call @llvm.vp.gather.nxv8f16.nxv8p0f16( %ptrs, %m, i32 %evl) + ret %v +} + +define @vpgather_baseidx_nxv8f16(half* %base, %idxs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_baseidx_nxv8f16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsext.vf2 v28, v8 +; RV32-NEXT: vadd.vv v28, v28, v28 +; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV32-NEXT: vluxei32.v v8, (a0), v28, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_baseidx_nxv8f16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsext.vf4 v16, v8 +; RV64-NEXT: vadd.vv v16, v16, v16 +; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: ret + %ptrs = getelementptr inbounds half, half* %base, %idxs + %v = call @llvm.vp.gather.nxv8f16.nxv8p0f16( %ptrs, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.gather.nxv1f32.nxv1p0f32(, , i32) + +define @vpgather_nxv1f32( %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_nxv1f32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; RV32-NEXT: vluxei32.v v8, (zero), v8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_nxv1f32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; RV64-NEXT: vluxei64.v v25, (zero), v8, v0.t +; RV64-NEXT: vmv1r.v v8, v25 +; RV64-NEXT: ret + %v = call @llvm.vp.gather.nxv1f32.nxv1p0f32( %ptrs, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.gather.nxv2f32.nxv2p0f32(, , i32) + +define @vpgather_nxv2f32( %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_nxv2f32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; RV32-NEXT: vluxei32.v v8, (zero), v8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_nxv2f32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; RV64-NEXT: vluxei64.v v25, (zero), v8, v0.t +; RV64-NEXT: vmv1r.v v8, v25 +; RV64-NEXT: ret + %v = call @llvm.vp.gather.nxv2f32.nxv2p0f32( %ptrs, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.gather.nxv4f32.nxv4p0f32(, , i32) + +define @vpgather_nxv4f32( %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_nxv4f32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; RV32-NEXT: vluxei32.v v8, (zero), v8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_nxv4f32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; RV64-NEXT: vluxei64.v v26, (zero), v8, v0.t +; RV64-NEXT: vmv2r.v v8, v26 +; RV64-NEXT: ret + %v = call @llvm.vp.gather.nxv4f32.nxv4p0f32( %ptrs, %m, i32 %evl) + ret %v +} + +define @vpgather_truemask_nxv4f32( %ptrs, i32 zeroext %evl) { +; RV32-LABEL: vpgather_truemask_nxv4f32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; RV32-NEXT: vluxei32.v v8, (zero), v8 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_truemask_nxv4f32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; RV64-NEXT: vluxei64.v v26, (zero), v8 +; RV64-NEXT: vmv2r.v v8, v26 +; RV64-NEXT: ret + %mhead = insertelement undef, i1 1, i32 0 + %mtrue = shufflevector %mhead, undef, zeroinitializer + %v = call @llvm.vp.gather.nxv4f32.nxv4p0f32( %ptrs, %mtrue, i32 %evl) + ret %v +} + +declare @llvm.vp.gather.nxv8f32.nxv8p0f32(, , i32) + +define @vpgather_nxv8f32( %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_nxv8f32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; RV32-NEXT: vluxei32.v v8, (zero), v8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_nxv8f32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; RV64-NEXT: vluxei64.v v28, (zero), v8, v0.t +; RV64-NEXT: vmv4r.v v8, v28 +; RV64-NEXT: ret + %v = call @llvm.vp.gather.nxv8f32.nxv8p0f32( %ptrs, %m, i32 %evl) + ret %v +} + +define @vpgather_baseidx_nxv8i8_nxv8f32(float* %base, %idxs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_baseidx_nxv8i8_nxv8f32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsext.vf4 v28, v8 +; RV32-NEXT: vsll.vi v28, v28, 2 +; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV32-NEXT: vluxei32.v v8, (a0), v28, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_baseidx_nxv8i8_nxv8f32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsext.vf8 v16, v8 +; RV64-NEXT: vsll.vi v16, v16, 2 +; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: ret + %ptrs = getelementptr inbounds float, float* %base, %idxs + %v = call @llvm.vp.gather.nxv8f32.nxv8p0f32( %ptrs, %m, i32 %evl) + ret %v +} + +define @vpgather_baseidx_sext_nxv8i8_nxv8f32(float* %base, %idxs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_baseidx_sext_nxv8i8_nxv8f32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsext.vf4 v28, v8 +; RV32-NEXT: vsll.vi v28, v28, 2 +; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV32-NEXT: vluxei32.v v8, (a0), v28, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_baseidx_sext_nxv8i8_nxv8f32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsext.vf8 v16, v8 +; RV64-NEXT: vsll.vi v16, v16, 2 +; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: ret + %eidxs = sext %idxs to + %ptrs = getelementptr inbounds float, float* %base, %eidxs + %v = call @llvm.vp.gather.nxv8f32.nxv8p0f32( %ptrs, %m, i32 %evl) + ret %v +} + +define @vpgather_baseidx_zext_nxv8i8_nxv8f32(float* %base, %idxs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_baseidx_zext_nxv8i8_nxv8f32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vzext.vf4 v28, v8 +; RV32-NEXT: vsll.vi v28, v28, 2 +; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV32-NEXT: vluxei32.v v8, (a0), v28, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_baseidx_zext_nxv8i8_nxv8f32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vzext.vf8 v16, v8 +; RV64-NEXT: vsll.vi v16, v16, 2 +; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: ret + %eidxs = zext %idxs to + %ptrs = getelementptr inbounds float, float* %base, %eidxs + %v = call @llvm.vp.gather.nxv8f32.nxv8p0f32( %ptrs, %m, i32 %evl) + ret %v +} + +define @vpgather_baseidx_nxv8i16_nxv8f32(float* %base, %idxs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_baseidx_nxv8i16_nxv8f32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsext.vf2 v28, v8 +; RV32-NEXT: vsll.vi v28, v28, 2 +; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV32-NEXT: vluxei32.v v8, (a0), v28, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_baseidx_nxv8i16_nxv8f32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsext.vf4 v16, v8 +; RV64-NEXT: vsll.vi v16, v16, 2 +; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: ret + %ptrs = getelementptr inbounds float, float* %base, %idxs + %v = call @llvm.vp.gather.nxv8f32.nxv8p0f32( %ptrs, %m, i32 %evl) + ret %v +} + +define @vpgather_baseidx_sext_nxv8i16_nxv8f32(float* %base, %idxs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_baseidx_sext_nxv8i16_nxv8f32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsext.vf2 v28, v8 +; RV32-NEXT: vsll.vi v28, v28, 2 +; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV32-NEXT: vluxei32.v v8, (a0), v28, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_baseidx_sext_nxv8i16_nxv8f32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsext.vf4 v16, v8 +; RV64-NEXT: vsll.vi v16, v16, 2 +; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: ret + %eidxs = sext %idxs to + %ptrs = getelementptr inbounds float, float* %base, %eidxs + %v = call @llvm.vp.gather.nxv8f32.nxv8p0f32( %ptrs, %m, i32 %evl) + ret %v +} + +define @vpgather_baseidx_zext_nxv8i16_nxv8f32(float* %base, %idxs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_baseidx_zext_nxv8i16_nxv8f32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vzext.vf2 v28, v8 +; RV32-NEXT: vsll.vi v28, v28, 2 +; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV32-NEXT: vluxei32.v v8, (a0), v28, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_baseidx_zext_nxv8i16_nxv8f32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vzext.vf4 v16, v8 +; RV64-NEXT: vsll.vi v16, v16, 2 +; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: ret + %eidxs = zext %idxs to + %ptrs = getelementptr inbounds float, float* %base, %eidxs + %v = call @llvm.vp.gather.nxv8f32.nxv8p0f32( %ptrs, %m, i32 %evl) + ret %v +} + +define @vpgather_baseidx_nxv8f32(float* %base, %idxs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_baseidx_nxv8f32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsll.vi v28, v8, 2 +; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV32-NEXT: vluxei32.v v8, (a0), v28, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_baseidx_nxv8f32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsext.vf2 v16, v8 +; RV64-NEXT: vsll.vi v16, v16, 2 +; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: ret + %ptrs = getelementptr inbounds float, float* %base, %idxs + %v = call @llvm.vp.gather.nxv8f32.nxv8p0f32( %ptrs, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.gather.nxv1f64.nxv1p0f64(, , i32) + +define @vpgather_nxv1f64( %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_nxv1f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; RV32-NEXT: vluxei32.v v25, (zero), v8, v0.t +; RV32-NEXT: vmv1r.v v8, v25 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_nxv1f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; RV64-NEXT: vluxei64.v v8, (zero), v8, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.gather.nxv1f64.nxv1p0f64( %ptrs, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.gather.nxv2f64.nxv2p0f64(, , i32) + +define @vpgather_nxv2f64( %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_nxv2f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; RV32-NEXT: vluxei32.v v26, (zero), v8, v0.t +; RV32-NEXT: vmv2r.v v8, v26 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_nxv2f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; RV64-NEXT: vluxei64.v v8, (zero), v8, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.gather.nxv2f64.nxv2p0f64( %ptrs, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.gather.nxv4f64.nxv4p0f64(, , i32) + +define @vpgather_nxv4f64( %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_nxv4f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; RV32-NEXT: vluxei32.v v28, (zero), v8, v0.t +; RV32-NEXT: vmv4r.v v8, v28 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_nxv4f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; RV64-NEXT: vluxei64.v v8, (zero), v8, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.gather.nxv4f64.nxv4p0f64( %ptrs, %m, i32 %evl) + ret %v +} + +define @vpgather_truemask_nxv4f64( %ptrs, i32 zeroext %evl) { +; RV32-LABEL: vpgather_truemask_nxv4f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; RV32-NEXT: vluxei32.v v28, (zero), v8 +; RV32-NEXT: vmv4r.v v8, v28 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_truemask_nxv4f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; RV64-NEXT: vluxei64.v v8, (zero), v8 +; RV64-NEXT: ret + %mhead = insertelement undef, i1 1, i32 0 + %mtrue = shufflevector %mhead, undef, zeroinitializer + %v = call @llvm.vp.gather.nxv4f64.nxv4p0f64( %ptrs, %mtrue, i32 %evl) + ret %v +} + +declare @llvm.vp.gather.nxv8f64.nxv8p0f64(, , i32) + +define @vpgather_nxv8f64( %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_nxv8f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; RV32-NEXT: vluxei32.v v16, (zero), v8, v0.t +; RV32-NEXT: vmv8r.v v8, v16 +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_nxv8f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; RV64-NEXT: vluxei64.v v8, (zero), v8, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.gather.nxv8f64.nxv8p0f64( %ptrs, %m, i32 %evl) + ret %v +} + +define @vpgather_baseidx_nxv8i8_nxv8f64(double* %base, %idxs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_baseidx_nxv8i8_nxv8f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsext.vf4 v28, v8 +; RV32-NEXT: vsll.vi v28, v28, 3 +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vluxei32.v v8, (a0), v28, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_baseidx_nxv8i8_nxv8f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsext.vf8 v16, v8 +; RV64-NEXT: vsll.vi v8, v16, 3 +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t +; RV64-NEXT: ret + %ptrs = getelementptr inbounds double, double* %base, %idxs + %v = call @llvm.vp.gather.nxv8f64.nxv8p0f64( %ptrs, %m, i32 %evl) + ret %v +} + +define @vpgather_baseidx_sext_nxv8i8_nxv8f64(double* %base, %idxs, %m, i32 zeroext %evl) { +; CHECK-LABEL: vpgather_baseidx_sext_nxv8i8_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; CHECK-NEXT: vsext.vf8 v16, v8 +; CHECK-NEXT: vsll.vi v8, v16, 3 +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t +; CHECK-NEXT: ret + %eidxs = sext %idxs to + %ptrs = getelementptr inbounds double, double* %base, %eidxs + %v = call @llvm.vp.gather.nxv8f64.nxv8p0f64( %ptrs, %m, i32 %evl) + ret %v +} + +define @vpgather_baseidx_zext_nxv8i8_nxv8f64(double* %base, %idxs, %m, i32 zeroext %evl) { +; CHECK-LABEL: vpgather_baseidx_zext_nxv8i8_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; CHECK-NEXT: vzext.vf8 v16, v8 +; CHECK-NEXT: vsll.vi v8, v16, 3 +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t +; CHECK-NEXT: ret + %eidxs = zext %idxs to + %ptrs = getelementptr inbounds double, double* %base, %eidxs + %v = call @llvm.vp.gather.nxv8f64.nxv8p0f64( %ptrs, %m, i32 %evl) + ret %v +} + +define @vpgather_baseidx_nxv8i16_nxv8f64(double* %base, %idxs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_baseidx_nxv8i16_nxv8f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsext.vf2 v28, v8 +; RV32-NEXT: vsll.vi v28, v28, 3 +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vluxei32.v v8, (a0), v28, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_baseidx_nxv8i16_nxv8f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsext.vf4 v16, v8 +; RV64-NEXT: vsll.vi v8, v16, 3 +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t +; RV64-NEXT: ret + %ptrs = getelementptr inbounds double, double* %base, %idxs + %v = call @llvm.vp.gather.nxv8f64.nxv8p0f64( %ptrs, %m, i32 %evl) + ret %v +} + +define @vpgather_baseidx_sext_nxv8i16_nxv8f64(double* %base, %idxs, %m, i32 zeroext %evl) { +; CHECK-LABEL: vpgather_baseidx_sext_nxv8i16_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; CHECK-NEXT: vsext.vf4 v16, v8 +; CHECK-NEXT: vsll.vi v8, v16, 3 +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t +; CHECK-NEXT: ret + %eidxs = sext %idxs to + %ptrs = getelementptr inbounds double, double* %base, %eidxs + %v = call @llvm.vp.gather.nxv8f64.nxv8p0f64( %ptrs, %m, i32 %evl) + ret %v +} + +define @vpgather_baseidx_zext_nxv8i16_nxv8f64(double* %base, %idxs, %m, i32 zeroext %evl) { +; CHECK-LABEL: vpgather_baseidx_zext_nxv8i16_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; CHECK-NEXT: vzext.vf4 v16, v8 +; CHECK-NEXT: vsll.vi v8, v16, 3 +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t +; CHECK-NEXT: ret + %eidxs = zext %idxs to + %ptrs = getelementptr inbounds double, double* %base, %eidxs + %v = call @llvm.vp.gather.nxv8f64.nxv8p0f64( %ptrs, %m, i32 %evl) + ret %v +} + +define @vpgather_baseidx_nxv8i32_nxv8f64(double* %base, %idxs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpgather_baseidx_nxv8i32_nxv8f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsll.vi v28, v8, 3 +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vluxei32.v v8, (a0), v28, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpgather_baseidx_nxv8i32_nxv8f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsext.vf2 v16, v8 +; RV64-NEXT: vsll.vi v8, v16, 3 +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t +; RV64-NEXT: ret + %ptrs = getelementptr inbounds double, double* %base, %idxs + %v = call @llvm.vp.gather.nxv8f64.nxv8p0f64( %ptrs, %m, i32 %evl) + ret %v +} + +define @vpgather_baseidx_sext_nxv8i32_nxv8f64(double* %base, %idxs, %m, i32 zeroext %evl) { +; CHECK-LABEL: vpgather_baseidx_sext_nxv8i32_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; CHECK-NEXT: vsext.vf2 v16, v8 +; CHECK-NEXT: vsll.vi v8, v16, 3 +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t +; CHECK-NEXT: ret + %eidxs = sext %idxs to + %ptrs = getelementptr inbounds double, double* %base, %eidxs + %v = call @llvm.vp.gather.nxv8f64.nxv8p0f64( %ptrs, %m, i32 %evl) + ret %v +} + +define @vpgather_baseidx_zext_nxv8i32_nxv8f64(double* %base, %idxs, %m, i32 zeroext %evl) { +; CHECK-LABEL: vpgather_baseidx_zext_nxv8i32_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; CHECK-NEXT: vzext.vf2 v16, v8 +; CHECK-NEXT: vsll.vi v8, v16, 3 +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t +; CHECK-NEXT: ret + %eidxs = zext %idxs to + %ptrs = getelementptr inbounds double, double* %base, %eidxs + %v = call @llvm.vp.gather.nxv8f64.nxv8p0f64( %ptrs, %m, i32 %evl) + ret %v +} + +define @vpgather_baseidx_nxv8f64(double* %base, %idxs, %m, i32 zeroext %evl) { +; CHECK-LABEL: vpgather_baseidx_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; CHECK-NEXT: vsll.vi v8, v8, 3 +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t +; CHECK-NEXT: ret + %ptrs = getelementptr inbounds double, double* %base, %idxs + %v = call @llvm.vp.gather.nxv8f64.nxv8p0f64( %ptrs, %m, i32 %evl) + ret %v +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll @@ -0,0 +1,1656 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zfh,+experimental-v \ +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zfh,+experimental-v \ +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 + +declare void @llvm.vp.scatter.nxv1i8.nxv1p0i8(, , , i32) + +define void @vpscatter_nxv1i8( %val, %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_nxv1i8: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_nxv1i8: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t +; RV64-NEXT: ret + call void @llvm.vp.scatter.nxv1i8.nxv1p0i8( %val, %ptrs, %m, i32 %evl) + ret void +} + +declare void @llvm.vp.scatter.nxv2i8.nxv2p0i8(, , , i32) + +define void @vpscatter_nxv2i8( %val, %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_nxv2i8: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_nxv2i8: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t +; RV64-NEXT: ret + call void @llvm.vp.scatter.nxv2i8.nxv2p0i8( %val, %ptrs, %m, i32 %evl) + ret void +} + +define void @vpscatter_nxv2i16_truncstore_nxv2i8( %val, %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_nxv2i16_truncstore_nxv2i8: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; RV32-NEXT: vnsrl.wi v25, v8, 0 +; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; RV32-NEXT: vsoxei32.v v25, (zero), v9, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_nxv2i16_truncstore_nxv2i8: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; RV64-NEXT: vnsrl.wi v25, v8, 0 +; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; RV64-NEXT: vsoxei64.v v25, (zero), v10, v0.t +; RV64-NEXT: ret + %tval = trunc %val to + call void @llvm.vp.scatter.nxv2i8.nxv2p0i8( %tval, %ptrs, %m, i32 %evl) + ret void +} + +define void @vpscatter_nxv2i32_truncstore_nxv2i8( %val, %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_nxv2i32_truncstore_nxv2i8: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; RV32-NEXT: vnsrl.wi v25, v8, 0 +; RV32-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; RV32-NEXT: vnsrl.wi v25, v25, 0 +; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; RV32-NEXT: vsoxei32.v v25, (zero), v9, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_nxv2i32_truncstore_nxv2i8: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; RV64-NEXT: vnsrl.wi v25, v8, 0 +; RV64-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; RV64-NEXT: vnsrl.wi v25, v25, 0 +; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; RV64-NEXT: vsoxei64.v v25, (zero), v10, v0.t +; RV64-NEXT: ret + %tval = trunc %val to + call void @llvm.vp.scatter.nxv2i8.nxv2p0i8( %tval, %ptrs, %m, i32 %evl) + ret void +} + +define void @vpscatter_nxv2i64_truncstore_nxv2i8( %val, %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_nxv2i64_truncstore_nxv2i8: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; RV32-NEXT: vnsrl.wi v25, v8, 0 +; RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; RV32-NEXT: vnsrl.wi v25, v25, 0 +; RV32-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; RV32-NEXT: vnsrl.wi v25, v25, 0 +; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; RV32-NEXT: vsoxei32.v v25, (zero), v10, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_nxv2i64_truncstore_nxv2i8: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; RV64-NEXT: vnsrl.wi v25, v8, 0 +; RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; RV64-NEXT: vnsrl.wi v25, v25, 0 +; RV64-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; RV64-NEXT: vnsrl.wi v25, v25, 0 +; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; RV64-NEXT: vsoxei64.v v25, (zero), v10, v0.t +; RV64-NEXT: ret + %tval = trunc %val to + call void @llvm.vp.scatter.nxv2i8.nxv2p0i8( %tval, %ptrs, %m, i32 %evl) + ret void +} + +declare void @llvm.vp.scatter.nxv4i8.nxv4p0i8(, , , i32) + +define void @vpscatter_nxv4i8( %val, %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_nxv4i8: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_nxv4i8: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t +; RV64-NEXT: ret + call void @llvm.vp.scatter.nxv4i8.nxv4p0i8( %val, %ptrs, %m, i32 %evl) + ret void +} + +define void @vpscatter_truemask_nxv4i8( %val, %ptrs, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_truemask_nxv4i8: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; RV32-NEXT: vsoxei32.v v8, (zero), v10 +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_truemask_nxv4i8: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; RV64-NEXT: vsoxei64.v v8, (zero), v12 +; RV64-NEXT: ret + %mhead = insertelement undef, i1 1, i32 0 + %mtrue = shufflevector %mhead, undef, zeroinitializer + call void @llvm.vp.scatter.nxv4i8.nxv4p0i8( %val, %ptrs, %mtrue, i32 %evl) + ret void +} + +declare void @llvm.vp.scatter.nxv8i8.nxv8p0i8(, , , i32) + +define void @vpscatter_nxv8i8( %val, %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_nxv8i8: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; RV32-NEXT: vsoxei32.v v8, (zero), v12, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_nxv8i8: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; RV64-NEXT: vsoxei64.v v8, (zero), v16, v0.t +; RV64-NEXT: ret + call void @llvm.vp.scatter.nxv8i8.nxv8p0i8( %val, %ptrs, %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_nxv8i8( %val, i8* %base, %idxs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_baseidx_nxv8i8: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsext.vf4 v28, v9 +; RV32-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; RV32-NEXT: vsoxei32.v v8, (a0), v28, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_baseidx_nxv8i8: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsext.vf8 v16, v9 +; RV64-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: ret + %ptrs = getelementptr inbounds i8, i8* %base, %idxs + call void @llvm.vp.scatter.nxv8i8.nxv8p0i8( %val, %ptrs, %m, i32 %evl) + ret void +} + +declare void @llvm.vp.scatter.nxv1i16.nxv1p0i16(, , , i32) + +define void @vpscatter_nxv1i16( %val, %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_nxv1i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_nxv1i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t +; RV64-NEXT: ret + call void @llvm.vp.scatter.nxv1i16.nxv1p0i16( %val, %ptrs, %m, i32 %evl) + ret void +} + +declare void @llvm.vp.scatter.nxv2i16.nxv2p0i16(, , , i32) + +define void @vpscatter_nxv2i16( %val, %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_nxv2i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_nxv2i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t +; RV64-NEXT: ret + call void @llvm.vp.scatter.nxv2i16.nxv2p0i16( %val, %ptrs, %m, i32 %evl) + ret void +} + +define void @vpscatter_nxv2i32_truncstore_nxv2i16( %val, %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_nxv2i32_truncstore_nxv2i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; RV32-NEXT: vnsrl.wi v25, v8, 0 +; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV32-NEXT: vsoxei32.v v25, (zero), v9, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_nxv2i32_truncstore_nxv2i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; RV64-NEXT: vnsrl.wi v25, v8, 0 +; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV64-NEXT: vsoxei64.v v25, (zero), v10, v0.t +; RV64-NEXT: ret + %tval = trunc %val to + call void @llvm.vp.scatter.nxv2i16.nxv2p0i16( %tval, %ptrs, %m, i32 %evl) + ret void +} + +define void @vpscatter_nxv2i64_truncstore_nxv2i16( %val, %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_nxv2i64_truncstore_nxv2i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; RV32-NEXT: vnsrl.wi v25, v8, 0 +; RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; RV32-NEXT: vnsrl.wi v25, v25, 0 +; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV32-NEXT: vsoxei32.v v25, (zero), v10, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_nxv2i64_truncstore_nxv2i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; RV64-NEXT: vnsrl.wi v25, v8, 0 +; RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; RV64-NEXT: vnsrl.wi v25, v25, 0 +; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV64-NEXT: vsoxei64.v v25, (zero), v10, v0.t +; RV64-NEXT: ret + %tval = trunc %val to + call void @llvm.vp.scatter.nxv2i16.nxv2p0i16( %tval, %ptrs, %m, i32 %evl) + ret void +} + +declare void @llvm.vp.scatter.nxv4i16.nxv4p0i16(, , , i32) + +define void @vpscatter_nxv4i16( %val, %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_nxv4i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_nxv4i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t +; RV64-NEXT: ret + call void @llvm.vp.scatter.nxv4i16.nxv4p0i16( %val, %ptrs, %m, i32 %evl) + ret void +} + +define void @vpscatter_truemask_nxv4i16( %val, %ptrs, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_truemask_nxv4i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; RV32-NEXT: vsoxei32.v v8, (zero), v10 +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_truemask_nxv4i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; RV64-NEXT: vsoxei64.v v8, (zero), v12 +; RV64-NEXT: ret + %mhead = insertelement undef, i1 1, i32 0 + %mtrue = shufflevector %mhead, undef, zeroinitializer + call void @llvm.vp.scatter.nxv4i16.nxv4p0i16( %val, %ptrs, %mtrue, i32 %evl) + ret void +} + +declare void @llvm.vp.scatter.nxv8i16.nxv8p0i16(, , , i32) + +define void @vpscatter_nxv8i16( %val, %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_nxv8i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; RV32-NEXT: vsoxei32.v v8, (zero), v12, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_nxv8i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; RV64-NEXT: vsoxei64.v v8, (zero), v16, v0.t +; RV64-NEXT: ret + call void @llvm.vp.scatter.nxv8i16.nxv8p0i16( %val, %ptrs, %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_nxv8i8_nxv8i16( %val, i16* %base, %idxs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_baseidx_nxv8i8_nxv8i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsext.vf4 v28, v10 +; RV32-NEXT: vadd.vv v28, v28, v28 +; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV32-NEXT: vsoxei32.v v8, (a0), v28, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_baseidx_nxv8i8_nxv8i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsext.vf8 v16, v10 +; RV64-NEXT: vadd.vv v16, v16, v16 +; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: ret + %ptrs = getelementptr inbounds i16, i16* %base, %idxs + call void @llvm.vp.scatter.nxv8i16.nxv8p0i16( %val, %ptrs, %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_sext_nxv8i8_nxv8i16( %val, i16* %base, %idxs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_baseidx_sext_nxv8i8_nxv8i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsext.vf4 v28, v10 +; RV32-NEXT: vadd.vv v28, v28, v28 +; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV32-NEXT: vsoxei32.v v8, (a0), v28, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_baseidx_sext_nxv8i8_nxv8i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsext.vf8 v16, v10 +; RV64-NEXT: vadd.vv v16, v16, v16 +; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: ret + %eidxs = sext %idxs to + %ptrs = getelementptr inbounds i16, i16* %base, %eidxs + call void @llvm.vp.scatter.nxv8i16.nxv8p0i16( %val, %ptrs, %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_zext_nxv8i8_nxv8i16( %val, i16* %base, %idxs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_baseidx_zext_nxv8i8_nxv8i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vzext.vf4 v28, v10 +; RV32-NEXT: vadd.vv v28, v28, v28 +; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV32-NEXT: vsoxei32.v v8, (a0), v28, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_baseidx_zext_nxv8i8_nxv8i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vzext.vf8 v16, v10 +; RV64-NEXT: vadd.vv v16, v16, v16 +; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: ret + %eidxs = zext %idxs to + %ptrs = getelementptr inbounds i16, i16* %base, %eidxs + call void @llvm.vp.scatter.nxv8i16.nxv8p0i16( %val, %ptrs, %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_nxv8i16( %val, i16* %base, %idxs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_baseidx_nxv8i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsext.vf2 v28, v10 +; RV32-NEXT: vadd.vv v28, v28, v28 +; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV32-NEXT: vsoxei32.v v8, (a0), v28, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_baseidx_nxv8i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsext.vf4 v16, v10 +; RV64-NEXT: vadd.vv v16, v16, v16 +; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: ret + %ptrs = getelementptr inbounds i16, i16* %base, %idxs + call void @llvm.vp.scatter.nxv8i16.nxv8p0i16( %val, %ptrs, %m, i32 %evl) + ret void +} + +declare void @llvm.vp.scatter.nxv1i32.nxv1p0i32(, , , i32) + +define void @vpscatter_nxv1i32( %val, %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_nxv1i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_nxv1i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t +; RV64-NEXT: ret + call void @llvm.vp.scatter.nxv1i32.nxv1p0i32( %val, %ptrs, %m, i32 %evl) + ret void +} + +declare void @llvm.vp.scatter.nxv2i32.nxv2p0i32(, , , i32) + +define void @vpscatter_nxv2i32( %val, %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_nxv2i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_nxv2i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t +; RV64-NEXT: ret + call void @llvm.vp.scatter.nxv2i32.nxv2p0i32( %val, %ptrs, %m, i32 %evl) + ret void +} + +define void @vpscatter_nxv2i64_truncstore_nxv2i32( %val, %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_nxv2i64_truncstore_nxv2i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; RV32-NEXT: vnsrl.wi v25, v8, 0 +; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; RV32-NEXT: vsoxei32.v v25, (zero), v10, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_nxv2i64_truncstore_nxv2i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; RV64-NEXT: vnsrl.wi v25, v8, 0 +; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; RV64-NEXT: vsoxei64.v v25, (zero), v10, v0.t +; RV64-NEXT: ret + %tval = trunc %val to + call void @llvm.vp.scatter.nxv2i32.nxv2p0i32( %tval, %ptrs, %m, i32 %evl) + ret void +} + +declare void @llvm.vp.scatter.nxv4i32.nxv4p0i32(, , , i32) + +define void @vpscatter_nxv4i32( %val, %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_nxv4i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_nxv4i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t +; RV64-NEXT: ret + call void @llvm.vp.scatter.nxv4i32.nxv4p0i32( %val, %ptrs, %m, i32 %evl) + ret void +} + +define void @vpscatter_truemask_nxv4i32( %val, %ptrs, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_truemask_nxv4i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; RV32-NEXT: vsoxei32.v v8, (zero), v10 +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_truemask_nxv4i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; RV64-NEXT: vsoxei64.v v8, (zero), v12 +; RV64-NEXT: ret + %mhead = insertelement undef, i1 1, i32 0 + %mtrue = shufflevector %mhead, undef, zeroinitializer + call void @llvm.vp.scatter.nxv4i32.nxv4p0i32( %val, %ptrs, %mtrue, i32 %evl) + ret void +} + +declare void @llvm.vp.scatter.nxv8i32.nxv8p0i32(, , , i32) + +define void @vpscatter_nxv8i32( %val, %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_nxv8i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; RV32-NEXT: vsoxei32.v v8, (zero), v12, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_nxv8i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; RV64-NEXT: vsoxei64.v v8, (zero), v16, v0.t +; RV64-NEXT: ret + call void @llvm.vp.scatter.nxv8i32.nxv8p0i32( %val, %ptrs, %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_nxv8i8_nxv8i32( %val, i32* %base, %idxs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_baseidx_nxv8i8_nxv8i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsext.vf4 v28, v12 +; RV32-NEXT: vsll.vi v28, v28, 2 +; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV32-NEXT: vsoxei32.v v8, (a0), v28, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_baseidx_nxv8i8_nxv8i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsext.vf8 v16, v12 +; RV64-NEXT: vsll.vi v16, v16, 2 +; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: ret + %ptrs = getelementptr inbounds i32, i32* %base, %idxs + call void @llvm.vp.scatter.nxv8i32.nxv8p0i32( %val, %ptrs, %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_sext_nxv8i8_nxv8i32( %val, i32* %base, %idxs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_baseidx_sext_nxv8i8_nxv8i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsext.vf4 v28, v12 +; RV32-NEXT: vsll.vi v28, v28, 2 +; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV32-NEXT: vsoxei32.v v8, (a0), v28, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_baseidx_sext_nxv8i8_nxv8i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsext.vf8 v16, v12 +; RV64-NEXT: vsll.vi v16, v16, 2 +; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: ret + %eidxs = sext %idxs to + %ptrs = getelementptr inbounds i32, i32* %base, %eidxs + call void @llvm.vp.scatter.nxv8i32.nxv8p0i32( %val, %ptrs, %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_zext_nxv8i8_nxv8i32( %val, i32* %base, %idxs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_baseidx_zext_nxv8i8_nxv8i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vzext.vf4 v28, v12 +; RV32-NEXT: vsll.vi v28, v28, 2 +; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV32-NEXT: vsoxei32.v v8, (a0), v28, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_baseidx_zext_nxv8i8_nxv8i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vzext.vf8 v16, v12 +; RV64-NEXT: vsll.vi v16, v16, 2 +; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: ret + %eidxs = zext %idxs to + %ptrs = getelementptr inbounds i32, i32* %base, %eidxs + call void @llvm.vp.scatter.nxv8i32.nxv8p0i32( %val, %ptrs, %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_nxv8i16_nxv8i32( %val, i32* %base, %idxs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_baseidx_nxv8i16_nxv8i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsext.vf2 v28, v12 +; RV32-NEXT: vsll.vi v28, v28, 2 +; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV32-NEXT: vsoxei32.v v8, (a0), v28, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_baseidx_nxv8i16_nxv8i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsext.vf4 v16, v12 +; RV64-NEXT: vsll.vi v16, v16, 2 +; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: ret + %ptrs = getelementptr inbounds i32, i32* %base, %idxs + call void @llvm.vp.scatter.nxv8i32.nxv8p0i32( %val, %ptrs, %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_sext_nxv8i16_nxv8i32( %val, i32* %base, %idxs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_baseidx_sext_nxv8i16_nxv8i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsext.vf2 v28, v12 +; RV32-NEXT: vsll.vi v28, v28, 2 +; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV32-NEXT: vsoxei32.v v8, (a0), v28, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_baseidx_sext_nxv8i16_nxv8i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsext.vf4 v16, v12 +; RV64-NEXT: vsll.vi v16, v16, 2 +; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: ret + %eidxs = sext %idxs to + %ptrs = getelementptr inbounds i32, i32* %base, %eidxs + call void @llvm.vp.scatter.nxv8i32.nxv8p0i32( %val, %ptrs, %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_zext_nxv8i16_nxv8i32( %val, i32* %base, %idxs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_baseidx_zext_nxv8i16_nxv8i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vzext.vf2 v28, v12 +; RV32-NEXT: vsll.vi v28, v28, 2 +; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV32-NEXT: vsoxei32.v v8, (a0), v28, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_baseidx_zext_nxv8i16_nxv8i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vzext.vf4 v16, v12 +; RV64-NEXT: vsll.vi v16, v16, 2 +; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: ret + %eidxs = zext %idxs to + %ptrs = getelementptr inbounds i32, i32* %base, %eidxs + call void @llvm.vp.scatter.nxv8i32.nxv8p0i32( %val, %ptrs, %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_nxv8i32( %val, i32* %base, %idxs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_baseidx_nxv8i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsll.vi v28, v12, 2 +; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV32-NEXT: vsoxei32.v v8, (a0), v28, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_baseidx_nxv8i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsext.vf2 v16, v12 +; RV64-NEXT: vsll.vi v16, v16, 2 +; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: ret + %ptrs = getelementptr inbounds i32, i32* %base, %idxs + call void @llvm.vp.scatter.nxv8i32.nxv8p0i32( %val, %ptrs, %m, i32 %evl) + ret void +} + +declare void @llvm.vp.scatter.nxv1i64.nxv1p0i64(, , , i32) + +define void @vpscatter_nxv1i64( %val, %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_nxv1i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_nxv1i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t +; RV64-NEXT: ret + call void @llvm.vp.scatter.nxv1i64.nxv1p0i64( %val, %ptrs, %m, i32 %evl) + ret void +} + +declare void @llvm.vp.scatter.nxv2i64.nxv2p0i64(, , , i32) + +define void @vpscatter_nxv2i64( %val, %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_nxv2i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_nxv2i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t +; RV64-NEXT: ret + call void @llvm.vp.scatter.nxv2i64.nxv2p0i64( %val, %ptrs, %m, i32 %evl) + ret void +} + +declare void @llvm.vp.scatter.nxv4i64.nxv4p0i64(, , , i32) + +define void @vpscatter_nxv4i64( %val, %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_nxv4i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; RV32-NEXT: vsoxei32.v v8, (zero), v12, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_nxv4i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t +; RV64-NEXT: ret + call void @llvm.vp.scatter.nxv4i64.nxv4p0i64( %val, %ptrs, %m, i32 %evl) + ret void +} + +define void @vpscatter_truemask_nxv4i64( %val, %ptrs, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_truemask_nxv4i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; RV32-NEXT: vsoxei32.v v8, (zero), v12 +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_truemask_nxv4i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; RV64-NEXT: vsoxei64.v v8, (zero), v12 +; RV64-NEXT: ret + %mhead = insertelement undef, i1 1, i32 0 + %mtrue = shufflevector %mhead, undef, zeroinitializer + call void @llvm.vp.scatter.nxv4i64.nxv4p0i64( %val, %ptrs, %mtrue, i32 %evl) + ret void +} + +declare void @llvm.vp.scatter.nxv8i64.nxv8p0i64(, , , i32) + +define void @vpscatter_nxv8i64( %val, %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_nxv8i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; RV32-NEXT: vsoxei32.v v8, (zero), v16, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_nxv8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; RV64-NEXT: vsoxei64.v v8, (zero), v16, v0.t +; RV64-NEXT: ret + call void @llvm.vp.scatter.nxv8i64.nxv8p0i64( %val, %ptrs, %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_nxv8i8_nxv8i64( %val, i64* %base, %idxs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_baseidx_nxv8i8_nxv8i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsext.vf4 v28, v16 +; RV32-NEXT: vsll.vi v28, v28, 3 +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsoxei32.v v8, (a0), v28, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_baseidx_nxv8i8_nxv8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsext.vf8 v24, v16 +; RV64-NEXT: vsll.vi v16, v24, 3 +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: ret + %ptrs = getelementptr inbounds i64, i64* %base, %idxs + call void @llvm.vp.scatter.nxv8i64.nxv8p0i64( %val, %ptrs, %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_sext_nxv8i8_nxv8i64( %val, i64* %base, %idxs, %m, i32 zeroext %evl) { +; CHECK-LABEL: vpscatter_baseidx_sext_nxv8i8_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; CHECK-NEXT: vsext.vf8 v24, v16 +; CHECK-NEXT: vsll.vi v16, v24, 3 +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret + %eidxs = sext %idxs to + %ptrs = getelementptr inbounds i64, i64* %base, %eidxs + call void @llvm.vp.scatter.nxv8i64.nxv8p0i64( %val, %ptrs, %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_zext_nxv8i8_nxv8i64( %val, i64* %base, %idxs, %m, i32 zeroext %evl) { +; CHECK-LABEL: vpscatter_baseidx_zext_nxv8i8_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; CHECK-NEXT: vzext.vf8 v24, v16 +; CHECK-NEXT: vsll.vi v16, v24, 3 +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret + %eidxs = zext %idxs to + %ptrs = getelementptr inbounds i64, i64* %base, %eidxs + call void @llvm.vp.scatter.nxv8i64.nxv8p0i64( %val, %ptrs, %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_nxv8i16_nxv8i64( %val, i64* %base, %idxs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_baseidx_nxv8i16_nxv8i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsext.vf2 v28, v16 +; RV32-NEXT: vsll.vi v28, v28, 3 +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsoxei32.v v8, (a0), v28, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_baseidx_nxv8i16_nxv8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsext.vf4 v24, v16 +; RV64-NEXT: vsll.vi v16, v24, 3 +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: ret + %ptrs = getelementptr inbounds i64, i64* %base, %idxs + call void @llvm.vp.scatter.nxv8i64.nxv8p0i64( %val, %ptrs, %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_sext_nxv8i16_nxv8i64( %val, i64* %base, %idxs, %m, i32 zeroext %evl) { +; CHECK-LABEL: vpscatter_baseidx_sext_nxv8i16_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; CHECK-NEXT: vsext.vf4 v24, v16 +; CHECK-NEXT: vsll.vi v16, v24, 3 +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret + %eidxs = sext %idxs to + %ptrs = getelementptr inbounds i64, i64* %base, %eidxs + call void @llvm.vp.scatter.nxv8i64.nxv8p0i64( %val, %ptrs, %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_zext_nxv8i16_nxv8i64( %val, i64* %base, %idxs, %m, i32 zeroext %evl) { +; CHECK-LABEL: vpscatter_baseidx_zext_nxv8i16_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; CHECK-NEXT: vzext.vf4 v24, v16 +; CHECK-NEXT: vsll.vi v16, v24, 3 +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret + %eidxs = zext %idxs to + %ptrs = getelementptr inbounds i64, i64* %base, %eidxs + call void @llvm.vp.scatter.nxv8i64.nxv8p0i64( %val, %ptrs, %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_nxv8i32_nxv8i64( %val, i64* %base, %idxs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_baseidx_nxv8i32_nxv8i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsll.vi v28, v16, 3 +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsoxei32.v v8, (a0), v28, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_baseidx_nxv8i32_nxv8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsext.vf2 v24, v16 +; RV64-NEXT: vsll.vi v16, v24, 3 +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: ret + %ptrs = getelementptr inbounds i64, i64* %base, %idxs + call void @llvm.vp.scatter.nxv8i64.nxv8p0i64( %val, %ptrs, %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_sext_nxv8i32_nxv8i64( %val, i64* %base, %idxs, %m, i32 zeroext %evl) { +; CHECK-LABEL: vpscatter_baseidx_sext_nxv8i32_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; CHECK-NEXT: vsext.vf2 v24, v16 +; CHECK-NEXT: vsll.vi v16, v24, 3 +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret + %eidxs = sext %idxs to + %ptrs = getelementptr inbounds i64, i64* %base, %eidxs + call void @llvm.vp.scatter.nxv8i64.nxv8p0i64( %val, %ptrs, %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_zext_nxv8i32_nxv8i64( %val, i64* %base, %idxs, %m, i32 zeroext %evl) { +; CHECK-LABEL: vpscatter_baseidx_zext_nxv8i32_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; CHECK-NEXT: vzext.vf2 v24, v16 +; CHECK-NEXT: vsll.vi v16, v24, 3 +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret + %eidxs = zext %idxs to + %ptrs = getelementptr inbounds i64, i64* %base, %eidxs + call void @llvm.vp.scatter.nxv8i64.nxv8p0i64( %val, %ptrs, %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_nxv8i64( %val, i64* %base, %idxs, %m, i32 zeroext %evl) { +; CHECK-LABEL: vpscatter_baseidx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; CHECK-NEXT: vsll.vi v16, v16, 3 +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret + %ptrs = getelementptr inbounds i64, i64* %base, %idxs + call void @llvm.vp.scatter.nxv8i64.nxv8p0i64( %val, %ptrs, %m, i32 %evl) + ret void +} + +declare void @llvm.vp.scatter.nxv1f16.nxv1p0f16(, , , i32) + +define void @vpscatter_nxv1f16( %val, %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_nxv1f16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_nxv1f16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t +; RV64-NEXT: ret + call void @llvm.vp.scatter.nxv1f16.nxv1p0f16( %val, %ptrs, %m, i32 %evl) + ret void +} + +declare void @llvm.vp.scatter.nxv2f16.nxv2p0f16(, , , i32) + +define void @vpscatter_nxv2f16( %val, %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_nxv2f16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_nxv2f16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t +; RV64-NEXT: ret + call void @llvm.vp.scatter.nxv2f16.nxv2p0f16( %val, %ptrs, %m, i32 %evl) + ret void +} + +declare void @llvm.vp.scatter.nxv4f16.nxv4p0f16(, , , i32) + +define void @vpscatter_nxv4f16( %val, %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_nxv4f16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_nxv4f16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t +; RV64-NEXT: ret + call void @llvm.vp.scatter.nxv4f16.nxv4p0f16( %val, %ptrs, %m, i32 %evl) + ret void +} + +define void @vpscatter_truemask_nxv4f16( %val, %ptrs, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_truemask_nxv4f16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; RV32-NEXT: vsoxei32.v v8, (zero), v10 +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_truemask_nxv4f16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; RV64-NEXT: vsoxei64.v v8, (zero), v12 +; RV64-NEXT: ret + %mhead = insertelement undef, i1 1, i32 0 + %mtrue = shufflevector %mhead, undef, zeroinitializer + call void @llvm.vp.scatter.nxv4f16.nxv4p0f16( %val, %ptrs, %mtrue, i32 %evl) + ret void +} + +declare void @llvm.vp.scatter.nxv8f16.nxv8p0f16(, , , i32) + +define void @vpscatter_nxv8f16( %val, %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_nxv8f16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; RV32-NEXT: vsoxei32.v v8, (zero), v12, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_nxv8f16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; RV64-NEXT: vsoxei64.v v8, (zero), v16, v0.t +; RV64-NEXT: ret + call void @llvm.vp.scatter.nxv8f16.nxv8p0f16( %val, %ptrs, %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_nxv8i8_nxv8f16( %val, half* %base, %idxs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_baseidx_nxv8i8_nxv8f16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsext.vf4 v28, v10 +; RV32-NEXT: vadd.vv v28, v28, v28 +; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV32-NEXT: vsoxei32.v v8, (a0), v28, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_baseidx_nxv8i8_nxv8f16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsext.vf8 v16, v10 +; RV64-NEXT: vadd.vv v16, v16, v16 +; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: ret + %ptrs = getelementptr inbounds half, half* %base, %idxs + call void @llvm.vp.scatter.nxv8f16.nxv8p0f16( %val, %ptrs, %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_sext_nxv8i8_nxv8f16( %val, half* %base, %idxs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_baseidx_sext_nxv8i8_nxv8f16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsext.vf4 v28, v10 +; RV32-NEXT: vadd.vv v28, v28, v28 +; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV32-NEXT: vsoxei32.v v8, (a0), v28, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_baseidx_sext_nxv8i8_nxv8f16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsext.vf8 v16, v10 +; RV64-NEXT: vadd.vv v16, v16, v16 +; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: ret + %eidxs = sext %idxs to + %ptrs = getelementptr inbounds half, half* %base, %eidxs + call void @llvm.vp.scatter.nxv8f16.nxv8p0f16( %val, %ptrs, %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_zext_nxv8i8_nxv8f16( %val, half* %base, %idxs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_baseidx_zext_nxv8i8_nxv8f16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vzext.vf4 v28, v10 +; RV32-NEXT: vadd.vv v28, v28, v28 +; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV32-NEXT: vsoxei32.v v8, (a0), v28, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_baseidx_zext_nxv8i8_nxv8f16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vzext.vf8 v16, v10 +; RV64-NEXT: vadd.vv v16, v16, v16 +; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: ret + %eidxs = zext %idxs to + %ptrs = getelementptr inbounds half, half* %base, %eidxs + call void @llvm.vp.scatter.nxv8f16.nxv8p0f16( %val, %ptrs, %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_nxv8f16( %val, half* %base, %idxs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_baseidx_nxv8f16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsext.vf2 v28, v10 +; RV32-NEXT: vadd.vv v28, v28, v28 +; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV32-NEXT: vsoxei32.v v8, (a0), v28, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_baseidx_nxv8f16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsext.vf4 v16, v10 +; RV64-NEXT: vadd.vv v16, v16, v16 +; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: ret + %ptrs = getelementptr inbounds half, half* %base, %idxs + call void @llvm.vp.scatter.nxv8f16.nxv8p0f16( %val, %ptrs, %m, i32 %evl) + ret void +} + +declare void @llvm.vp.scatter.nxv1f32.nxv1p0f32(, , , i32) + +define void @vpscatter_nxv1f32( %val, %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_nxv1f32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_nxv1f32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t +; RV64-NEXT: ret + call void @llvm.vp.scatter.nxv1f32.nxv1p0f32( %val, %ptrs, %m, i32 %evl) + ret void +} + +declare void @llvm.vp.scatter.nxv2f32.nxv2p0f32(, , , i32) + +define void @vpscatter_nxv2f32( %val, %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_nxv2f32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_nxv2f32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t +; RV64-NEXT: ret + call void @llvm.vp.scatter.nxv2f32.nxv2p0f32( %val, %ptrs, %m, i32 %evl) + ret void +} + +declare void @llvm.vp.scatter.nxv4f32.nxv4p0f32(, , , i32) + +define void @vpscatter_nxv4f32( %val, %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_nxv4f32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_nxv4f32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t +; RV64-NEXT: ret + call void @llvm.vp.scatter.nxv4f32.nxv4p0f32( %val, %ptrs, %m, i32 %evl) + ret void +} + +define void @vpscatter_truemask_nxv4f32( %val, %ptrs, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_truemask_nxv4f32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; RV32-NEXT: vsoxei32.v v8, (zero), v10 +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_truemask_nxv4f32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; RV64-NEXT: vsoxei64.v v8, (zero), v12 +; RV64-NEXT: ret + %mhead = insertelement undef, i1 1, i32 0 + %mtrue = shufflevector %mhead, undef, zeroinitializer + call void @llvm.vp.scatter.nxv4f32.nxv4p0f32( %val, %ptrs, %mtrue, i32 %evl) + ret void +} + +declare void @llvm.vp.scatter.nxv8f32.nxv8p0f32(, , , i32) + +define void @vpscatter_nxv8f32( %val, %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_nxv8f32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; RV32-NEXT: vsoxei32.v v8, (zero), v12, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_nxv8f32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; RV64-NEXT: vsoxei64.v v8, (zero), v16, v0.t +; RV64-NEXT: ret + call void @llvm.vp.scatter.nxv8f32.nxv8p0f32( %val, %ptrs, %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_nxv8i8_nxv8f32( %val, float* %base, %idxs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_baseidx_nxv8i8_nxv8f32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsext.vf4 v28, v12 +; RV32-NEXT: vsll.vi v28, v28, 2 +; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV32-NEXT: vsoxei32.v v8, (a0), v28, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_baseidx_nxv8i8_nxv8f32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsext.vf8 v16, v12 +; RV64-NEXT: vsll.vi v16, v16, 2 +; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: ret + %ptrs = getelementptr inbounds float, float* %base, %idxs + call void @llvm.vp.scatter.nxv8f32.nxv8p0f32( %val, %ptrs, %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_sext_nxv8i8_nxv8f32( %val, float* %base, %idxs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_baseidx_sext_nxv8i8_nxv8f32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsext.vf4 v28, v12 +; RV32-NEXT: vsll.vi v28, v28, 2 +; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV32-NEXT: vsoxei32.v v8, (a0), v28, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_baseidx_sext_nxv8i8_nxv8f32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsext.vf8 v16, v12 +; RV64-NEXT: vsll.vi v16, v16, 2 +; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: ret + %eidxs = sext %idxs to + %ptrs = getelementptr inbounds float, float* %base, %eidxs + call void @llvm.vp.scatter.nxv8f32.nxv8p0f32( %val, %ptrs, %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_zext_nxv8i8_nxv8f32( %val, float* %base, %idxs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_baseidx_zext_nxv8i8_nxv8f32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vzext.vf4 v28, v12 +; RV32-NEXT: vsll.vi v28, v28, 2 +; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV32-NEXT: vsoxei32.v v8, (a0), v28, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_baseidx_zext_nxv8i8_nxv8f32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vzext.vf8 v16, v12 +; RV64-NEXT: vsll.vi v16, v16, 2 +; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: ret + %eidxs = zext %idxs to + %ptrs = getelementptr inbounds float, float* %base, %eidxs + call void @llvm.vp.scatter.nxv8f32.nxv8p0f32( %val, %ptrs, %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_nxv8i16_nxv8f32( %val, float* %base, %idxs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_baseidx_nxv8i16_nxv8f32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsext.vf2 v28, v12 +; RV32-NEXT: vsll.vi v28, v28, 2 +; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV32-NEXT: vsoxei32.v v8, (a0), v28, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_baseidx_nxv8i16_nxv8f32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsext.vf4 v16, v12 +; RV64-NEXT: vsll.vi v16, v16, 2 +; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: ret + %ptrs = getelementptr inbounds float, float* %base, %idxs + call void @llvm.vp.scatter.nxv8f32.nxv8p0f32( %val, %ptrs, %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_sext_nxv8i16_nxv8f32( %val, float* %base, %idxs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_baseidx_sext_nxv8i16_nxv8f32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsext.vf2 v28, v12 +; RV32-NEXT: vsll.vi v28, v28, 2 +; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV32-NEXT: vsoxei32.v v8, (a0), v28, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_baseidx_sext_nxv8i16_nxv8f32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsext.vf4 v16, v12 +; RV64-NEXT: vsll.vi v16, v16, 2 +; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: ret + %eidxs = sext %idxs to + %ptrs = getelementptr inbounds float, float* %base, %eidxs + call void @llvm.vp.scatter.nxv8f32.nxv8p0f32( %val, %ptrs, %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_zext_nxv8i16_nxv8f32( %val, float* %base, %idxs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_baseidx_zext_nxv8i16_nxv8f32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vzext.vf2 v28, v12 +; RV32-NEXT: vsll.vi v28, v28, 2 +; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV32-NEXT: vsoxei32.v v8, (a0), v28, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_baseidx_zext_nxv8i16_nxv8f32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vzext.vf4 v16, v12 +; RV64-NEXT: vsll.vi v16, v16, 2 +; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: ret + %eidxs = zext %idxs to + %ptrs = getelementptr inbounds float, float* %base, %eidxs + call void @llvm.vp.scatter.nxv8f32.nxv8p0f32( %val, %ptrs, %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_nxv8f32( %val, float* %base, %idxs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_baseidx_nxv8f32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsll.vi v28, v12, 2 +; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV32-NEXT: vsoxei32.v v8, (a0), v28, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_baseidx_nxv8f32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsext.vf2 v16, v12 +; RV64-NEXT: vsll.vi v16, v16, 2 +; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: ret + %ptrs = getelementptr inbounds float, float* %base, %idxs + call void @llvm.vp.scatter.nxv8f32.nxv8p0f32( %val, %ptrs, %m, i32 %evl) + ret void +} + +declare void @llvm.vp.scatter.nxv1f64.nxv1p0f64(, , , i32) + +define void @vpscatter_nxv1f64( %val, %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_nxv1f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_nxv1f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t +; RV64-NEXT: ret + call void @llvm.vp.scatter.nxv1f64.nxv1p0f64( %val, %ptrs, %m, i32 %evl) + ret void +} + +declare void @llvm.vp.scatter.nxv2f64.nxv2p0f64(, , , i32) + +define void @vpscatter_nxv2f64( %val, %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_nxv2f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_nxv2f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t +; RV64-NEXT: ret + call void @llvm.vp.scatter.nxv2f64.nxv2p0f64( %val, %ptrs, %m, i32 %evl) + ret void +} + +declare void @llvm.vp.scatter.nxv4f64.nxv4p0f64(, , , i32) + +define void @vpscatter_nxv4f64( %val, %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_nxv4f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; RV32-NEXT: vsoxei32.v v8, (zero), v12, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_nxv4f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t +; RV64-NEXT: ret + call void @llvm.vp.scatter.nxv4f64.nxv4p0f64( %val, %ptrs, %m, i32 %evl) + ret void +} + +define void @vpscatter_truemask_nxv4f64( %val, %ptrs, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_truemask_nxv4f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; RV32-NEXT: vsoxei32.v v8, (zero), v12 +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_truemask_nxv4f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; RV64-NEXT: vsoxei64.v v8, (zero), v12 +; RV64-NEXT: ret + %mhead = insertelement undef, i1 1, i32 0 + %mtrue = shufflevector %mhead, undef, zeroinitializer + call void @llvm.vp.scatter.nxv4f64.nxv4p0f64( %val, %ptrs, %mtrue, i32 %evl) + ret void +} + +declare void @llvm.vp.scatter.nxv8f64.nxv8p0f64(, , , i32) + +define void @vpscatter_nxv8f64( %val, %ptrs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_nxv8f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; RV32-NEXT: vsoxei32.v v8, (zero), v16, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_nxv8f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; RV64-NEXT: vsoxei64.v v8, (zero), v16, v0.t +; RV64-NEXT: ret + call void @llvm.vp.scatter.nxv8f64.nxv8p0f64( %val, %ptrs, %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_nxv8i8_nxv8f64( %val, double* %base, %idxs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_baseidx_nxv8i8_nxv8f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsext.vf4 v28, v16 +; RV32-NEXT: vsll.vi v28, v28, 3 +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsoxei32.v v8, (a0), v28, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_baseidx_nxv8i8_nxv8f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsext.vf8 v24, v16 +; RV64-NEXT: vsll.vi v16, v24, 3 +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: ret + %ptrs = getelementptr inbounds double, double* %base, %idxs + call void @llvm.vp.scatter.nxv8f64.nxv8p0f64( %val, %ptrs, %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_sext_nxv8i8_nxv8f64( %val, double* %base, %idxs, %m, i32 zeroext %evl) { +; CHECK-LABEL: vpscatter_baseidx_sext_nxv8i8_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; CHECK-NEXT: vsext.vf8 v24, v16 +; CHECK-NEXT: vsll.vi v16, v24, 3 +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret + %eidxs = sext %idxs to + %ptrs = getelementptr inbounds double, double* %base, %eidxs + call void @llvm.vp.scatter.nxv8f64.nxv8p0f64( %val, %ptrs, %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_zext_nxv8i8_nxv8f64( %val, double* %base, %idxs, %m, i32 zeroext %evl) { +; CHECK-LABEL: vpscatter_baseidx_zext_nxv8i8_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; CHECK-NEXT: vzext.vf8 v24, v16 +; CHECK-NEXT: vsll.vi v16, v24, 3 +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret + %eidxs = zext %idxs to + %ptrs = getelementptr inbounds double, double* %base, %eidxs + call void @llvm.vp.scatter.nxv8f64.nxv8p0f64( %val, %ptrs, %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_nxv8i16_nxv8f64( %val, double* %base, %idxs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_baseidx_nxv8i16_nxv8f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsext.vf2 v28, v16 +; RV32-NEXT: vsll.vi v28, v28, 3 +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsoxei32.v v8, (a0), v28, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_baseidx_nxv8i16_nxv8f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsext.vf4 v24, v16 +; RV64-NEXT: vsll.vi v16, v24, 3 +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: ret + %ptrs = getelementptr inbounds double, double* %base, %idxs + call void @llvm.vp.scatter.nxv8f64.nxv8p0f64( %val, %ptrs, %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_sext_nxv8i16_nxv8f64( %val, double* %base, %idxs, %m, i32 zeroext %evl) { +; CHECK-LABEL: vpscatter_baseidx_sext_nxv8i16_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; CHECK-NEXT: vsext.vf4 v24, v16 +; CHECK-NEXT: vsll.vi v16, v24, 3 +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret + %eidxs = sext %idxs to + %ptrs = getelementptr inbounds double, double* %base, %eidxs + call void @llvm.vp.scatter.nxv8f64.nxv8p0f64( %val, %ptrs, %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_zext_nxv8i16_nxv8f64( %val, double* %base, %idxs, %m, i32 zeroext %evl) { +; CHECK-LABEL: vpscatter_baseidx_zext_nxv8i16_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; CHECK-NEXT: vzext.vf4 v24, v16 +; CHECK-NEXT: vsll.vi v16, v24, 3 +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret + %eidxs = zext %idxs to + %ptrs = getelementptr inbounds double, double* %base, %eidxs + call void @llvm.vp.scatter.nxv8f64.nxv8p0f64( %val, %ptrs, %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_nxv8i32_nxv8f64( %val, double* %base, %idxs, %m, i32 zeroext %evl) { +; RV32-LABEL: vpscatter_baseidx_nxv8i32_nxv8f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsll.vi v28, v16, 3 +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsoxei32.v v8, (a0), v28, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vpscatter_baseidx_nxv8i32_nxv8f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsext.vf2 v24, v16 +; RV64-NEXT: vsll.vi v16, v24, 3 +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: ret + %ptrs = getelementptr inbounds double, double* %base, %idxs + call void @llvm.vp.scatter.nxv8f64.nxv8p0f64( %val, %ptrs, %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_sext_nxv8i32_nxv8f64( %val, double* %base, %idxs, %m, i32 zeroext %evl) { +; CHECK-LABEL: vpscatter_baseidx_sext_nxv8i32_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; CHECK-NEXT: vsext.vf2 v24, v16 +; CHECK-NEXT: vsll.vi v16, v24, 3 +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret + %eidxs = sext %idxs to + %ptrs = getelementptr inbounds double, double* %base, %eidxs + call void @llvm.vp.scatter.nxv8f64.nxv8p0f64( %val, %ptrs, %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_zext_nxv8i32_nxv8f64( %val, double* %base, %idxs, %m, i32 zeroext %evl) { +; CHECK-LABEL: vpscatter_baseidx_zext_nxv8i32_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; CHECK-NEXT: vzext.vf2 v24, v16 +; CHECK-NEXT: vsll.vi v16, v24, 3 +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret + %eidxs = zext %idxs to + %ptrs = getelementptr inbounds double, double* %base, %eidxs + call void @llvm.vp.scatter.nxv8f64.nxv8p0f64( %val, %ptrs, %m, i32 %evl) + ret void +} + +define void @vpscatter_baseidx_nxv8f64( %val, double* %base, %idxs, %m, i32 zeroext %evl) { +; CHECK-LABEL: vpscatter_baseidx_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; CHECK-NEXT: vsll.vi v16, v16, 3 +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret + %ptrs = getelementptr inbounds double, double* %base, %idxs + call void @llvm.vp.scatter.nxv8f64.nxv8p0f64( %val, %ptrs, %m, i32 %evl) + ret void +}