diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp @@ -457,6 +457,7 @@ case ISD::USHLSAT: case ISD::FP_TO_SINT_SAT: case ISD::FP_TO_UINT_SAT: + case ISD::MGATHER: Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0)); break; case ISD::SMULFIX: diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -1204,6 +1204,8 @@ MVT::v2i32, MVT::v4i32, MVT::v1i64, MVT::v2i64}) { setOperationAction(ISD::MLOAD, VT, Custom); setOperationAction(ISD::MSTORE, VT, Custom); + setOperationAction(ISD::MGATHER, VT, Custom); + setOperationAction(ISD::MSCATTER, VT, Custom); } } @@ -1498,7 +1500,9 @@ setOperationAction(ISD::FSUB, VT, Custom); setOperationAction(ISD::FTRUNC, VT, Custom); setOperationAction(ISD::LOAD, VT, Custom); + setOperationAction(ISD::MGATHER, VT, Custom); setOperationAction(ISD::MLOAD, VT, Custom); + setOperationAction(ISD::MSCATTER, VT, Custom); setOperationAction(ISD::MSTORE, VT, Custom); setOperationAction(ISD::MUL, VT, Custom); setOperationAction(ISD::MULHS, VT, Custom); @@ -4214,6 +4218,10 @@ if (!isNullConstant(BasePtr)) return; + // FIXME: This will not match for fixed vector type codegen as the nodes in + // question will have fixed<->scalable conversions around them. This should be + // moved to a DAG combine or complex pattern, after all of the fixed vector + // insert and extracts have been removed. ConstantSDNode *Offset = nullptr; if (Index.getOpcode() == ISD::ADD) if (auto SplatVal = DAG.getSplatValue(Index.getOperand(1))) { @@ -4252,115 +4260,6 @@ Index = ConstOffset; } -SDValue AArch64TargetLowering::LowerMGATHER(SDValue Op, - SelectionDAG &DAG) const { - SDLoc DL(Op); - MaskedGatherSDNode *MGT = cast(Op); - assert(MGT && "Can only custom lower gather load nodes"); - - SDValue Index = MGT->getIndex(); - SDValue Chain = MGT->getChain(); - SDValue PassThru = MGT->getPassThru(); - SDValue Mask = MGT->getMask(); - SDValue BasePtr = MGT->getBasePtr(); - ISD::LoadExtType ExtTy = MGT->getExtensionType(); - - ISD::MemIndexType IndexType = MGT->getIndexType(); - bool IsScaled = - IndexType == ISD::SIGNED_SCALED || IndexType == ISD::UNSIGNED_SCALED; - bool IsSigned = - IndexType == ISD::SIGNED_SCALED || IndexType == ISD::SIGNED_UNSCALED; - bool IdxNeedsExtend = - getGatherScatterIndexIsExtended(Index) || - Index.getSimpleValueType().getVectorElementType() == MVT::i32; - bool ResNeedsSignExtend = ExtTy == ISD::EXTLOAD || ExtTy == ISD::SEXTLOAD; - - EVT VT = PassThru.getSimpleValueType(); - EVT MemVT = MGT->getMemoryVT(); - SDValue InputVT = DAG.getValueType(MemVT); - - if (VT.getVectorElementType() == MVT::bf16 && - !static_cast(DAG.getSubtarget()).hasBF16()) - return SDValue(); - - // Handle FP data by using an integer gather and casting the result. - if (VT.isFloatingPoint()) { - EVT PassThruVT = getPackedSVEVectorVT(VT.getVectorElementCount()); - PassThru = getSVESafeBitCast(PassThruVT, PassThru, DAG); - InputVT = DAG.getValueType(MemVT.changeVectorElementTypeToInteger()); - } - - SDVTList VTs = DAG.getVTList(PassThru.getSimpleValueType(), MVT::Other); - - if (getGatherScatterIndexIsExtended(Index)) - Index = Index.getOperand(0); - - unsigned Opcode = getGatherVecOpcode(IsScaled, IsSigned, IdxNeedsExtend); - selectGatherScatterAddrMode(BasePtr, Index, MemVT, Opcode, - /*isGather=*/true, DAG); - - if (ResNeedsSignExtend) - Opcode = getSignExtendedGatherOpcode(Opcode); - - SDValue Ops[] = {Chain, Mask, BasePtr, Index, InputVT, PassThru}; - SDValue Gather = DAG.getNode(Opcode, DL, VTs, Ops); - - if (VT.isFloatingPoint()) { - SDValue Cast = getSVESafeBitCast(VT, Gather, DAG); - return DAG.getMergeValues({Cast, Gather.getValue(1)}, DL); - } - - return Gather; -} - -SDValue AArch64TargetLowering::LowerMSCATTER(SDValue Op, - SelectionDAG &DAG) const { - SDLoc DL(Op); - MaskedScatterSDNode *MSC = cast(Op); - assert(MSC && "Can only custom lower scatter store nodes"); - - SDValue Index = MSC->getIndex(); - SDValue Chain = MSC->getChain(); - SDValue StoreVal = MSC->getValue(); - SDValue Mask = MSC->getMask(); - SDValue BasePtr = MSC->getBasePtr(); - - ISD::MemIndexType IndexType = MSC->getIndexType(); - bool IsScaled = - IndexType == ISD::SIGNED_SCALED || IndexType == ISD::UNSIGNED_SCALED; - bool IsSigned = - IndexType == ISD::SIGNED_SCALED || IndexType == ISD::SIGNED_UNSCALED; - bool NeedsExtend = - getGatherScatterIndexIsExtended(Index) || - Index.getSimpleValueType().getVectorElementType() == MVT::i32; - - EVT VT = StoreVal.getSimpleValueType(); - SDVTList VTs = DAG.getVTList(MVT::Other); - EVT MemVT = MSC->getMemoryVT(); - SDValue InputVT = DAG.getValueType(MemVT); - - if (VT.getVectorElementType() == MVT::bf16 && - !static_cast(DAG.getSubtarget()).hasBF16()) - return SDValue(); - - // Handle FP data by casting the data so an integer scatter can be used. - if (VT.isFloatingPoint()) { - EVT StoreValVT = getPackedSVEVectorVT(VT.getVectorElementCount()); - StoreVal = getSVESafeBitCast(StoreValVT, StoreVal, DAG); - InputVT = DAG.getValueType(MemVT.changeVectorElementTypeToInteger()); - } - - if (getGatherScatterIndexIsExtended(Index)) - Index = Index.getOperand(0); - - unsigned Opcode = getScatterVecOpcode(IsScaled, IsSigned, NeedsExtend); - selectGatherScatterAddrMode(BasePtr, Index, MemVT, Opcode, - /*isGather=*/false, DAG); - - SDValue Ops[] = {Chain, StoreVal, Mask, BasePtr, Index, InputVT}; - return DAG.getNode(Opcode, DL, VTs, Ops); -} - // Custom lower trunc store for v4i8 vectors, since it is promoted to v4i16. static SDValue LowerTruncateVectorStore(SDLoc DL, StoreSDNode *ST, EVT VT, EVT MemVT, @@ -15402,6 +15301,27 @@ return SDValue(); } +static SDValue performSetccMergeZeroCombine(SDNode *N, SelectionDAG &DAG) { + assert(N->getOpcode() == AArch64ISD::SETCC_MERGE_ZERO && + "Unexpected opcode!"); + + SDValue Pred = N->getOperand(0); + SDValue LHS = N->getOperand(1); + SDValue RHS = N->getOperand(2); + ISD::CondCode Cond = cast(N->getOperand(3))->get(); + + // setcc_merge_zero pred (sign_extend (setcc_merge_zero ... pred ...)), 0, ne + // => inner setcc_merge_zero + if (Cond == ISD::SETNE && isZerosVector(RHS.getNode()) && + LHS->getOpcode() == ISD::SIGN_EXTEND && + LHS->getOperand(0)->getValueType(0) == N->getValueType(0) && + LHS->getOperand(0)->getOpcode() == AArch64ISD::SETCC_MERGE_ZERO && + LHS->getOperand(0)->getOperand(0) == Pred) + return LHS->getOperand(0); + + return SDValue(); +} + // Optimize some simple tbz/tbnz cases. Returns the new operand and bit to test // as well as whether the test should be inverted. This code is required to // catch these cases (as opposed to standard dag combines) because @@ -16262,6 +16182,8 @@ return performSpliceCombine(N, DAG); case AArch64ISD::UZP1: return performUzpCombine(N, DAG); + case AArch64ISD::SETCC_MERGE_ZERO: + return performSetccMergeZeroCombine(N, DAG); case AArch64ISD::GLD1_MERGE_ZERO: case AArch64ISD::GLD1_SCALED_MERGE_ZERO: case AArch64ISD::GLD1_UXTW_MERGE_ZERO: @@ -17883,6 +17805,181 @@ return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResVT, Rdx, Zero); } +SDValue AArch64TargetLowering::LowerMGATHER(SDValue Op, + SelectionDAG &DAG) const { + SDLoc DL(Op); + MaskedGatherSDNode *MGT = cast(Op); + assert(MGT && "Can only custom lower gather load nodes"); + + bool IsFixedLength = MGT->getMemoryVT().isFixedLengthVector(); + + SDValue Index = MGT->getIndex(); + SDValue Chain = MGT->getChain(); + SDValue PassThru = MGT->getPassThru(); + SDValue Mask = MGT->getMask(); + SDValue BasePtr = MGT->getBasePtr(); + ISD::LoadExtType ExtTy = MGT->getExtensionType(); + + ISD::MemIndexType IndexType = MGT->getIndexType(); + bool IsScaled = + IndexType == ISD::SIGNED_SCALED || IndexType == ISD::UNSIGNED_SCALED; + bool IsSigned = + IndexType == ISD::SIGNED_SCALED || IndexType == ISD::SIGNED_UNSCALED; + bool IdxNeedsExtend = + getGatherScatterIndexIsExtended(Index) || + Index.getSimpleValueType().getVectorElementType() == MVT::i32; + bool ResNeedsSignExtend = ExtTy == ISD::EXTLOAD || ExtTy == ISD::SEXTLOAD; + + EVT VT = PassThru.getSimpleValueType(); + EVT IndexVT = Index.getSimpleValueType(); + EVT MemVT = MGT->getMemoryVT(); + SDValue InputVT = DAG.getValueType(MemVT); + + if (VT.getVectorElementType() == MVT::bf16 && + !static_cast(DAG.getSubtarget()).hasBF16()) + return SDValue(); + + if (IsFixedLength) { + IndexVT = getContainerForFixedLengthVector(DAG, IndexVT); + MemVT = IndexVT.changeVectorElementType(MemVT.getVectorElementType()); + InputVT = DAG.getValueType(MemVT.changeTypeToInteger()); + } + + bool IsPassThruZeroOrUndef = false; + if (PassThru->isUndef()) { + PassThru = DAG.getUNDEF(IndexVT); + IsPassThruZeroOrUndef = true; + } else { + if (isZerosVector(PassThru.getNode())) { + if (IndexVT.isInteger()) + PassThru = DAG.getConstant(0, DL, IndexVT); + else + PassThru = DAG.getConstantFP(0, DL, IndexVT); + IsPassThruZeroOrUndef = true; + } + } + + if (VT.isFloatingPoint() && !IsFixedLength) { + // Handle FP data by using an integer gather and casting the result. + EVT PassThruVT = getPackedSVEVectorVT(VT.getVectorElementCount()); + PassThru = getSVESafeBitCast(PassThruVT, PassThru, DAG); + InputVT = DAG.getValueType(MemVT.changeVectorElementTypeToInteger()); + } + + SDVTList VTs = DAG.getVTList(IndexVT, MVT::Other); + + if (getGatherScatterIndexIsExtended(Index)) + Index = Index.getOperand(0); + + unsigned Opcode = getGatherVecOpcode(IsScaled, IsSigned, IdxNeedsExtend); + selectGatherScatterAddrMode(BasePtr, Index, MemVT, Opcode, + /*isGather=*/true, DAG); + + if (ResNeedsSignExtend) + Opcode = getSignExtendedGatherOpcode(Opcode); + + if (IsFixedLength) { + if (Index.getSimpleValueType().isFixedLengthVector()) + Index = convertToScalableVector(DAG, IndexVT, Index); + if (BasePtr.getSimpleValueType().isFixedLengthVector()) + BasePtr = convertToScalableVector(DAG, IndexVT, BasePtr); + Mask = convertFixedMaskToScalableVector(Mask, DAG); + } + + SDValue Ops[] = {Chain, Mask, BasePtr, Index, InputVT, PassThru}; + SDValue Result = DAG.getNode(Opcode, DL, VTs, Ops); + Chain = Result.getValue(1); + + if (IsFixedLength) { + Result = convertFromScalableVector( + DAG, VT.changeVectorElementType(IndexVT.getVectorElementType()), + Result); + Result = DAG.getNode(ISD::TRUNCATE, DL, VT.changeTypeToInteger(), Result); + Result = DAG.getNode(ISD::BITCAST, DL, VT, Result); + + if (!IsPassThruZeroOrUndef) + Result = DAG.getSelect(DL, VT, MGT->getMask(), Result, PassThru); + } else { + if (!IsPassThruZeroOrUndef) + Result = DAG.getSelect(DL, IndexVT, Mask, Result, PassThru); + + if (VT.isFloatingPoint()) + Result = getSVESafeBitCast(VT, Result, DAG); + } + + return DAG.getMergeValues({Result, Chain}, DL); +} + +SDValue AArch64TargetLowering::LowerMSCATTER(SDValue Op, + SelectionDAG &DAG) const { + SDLoc DL(Op); + MaskedScatterSDNode *MSC = cast(Op); + assert(MSC && "Can only custom lower scatter store nodes"); + + bool IsFixedLength = MSC->getMemoryVT().isFixedLengthVector(); + + SDValue Index = MSC->getIndex(); + SDValue Chain = MSC->getChain(); + SDValue StoreVal = MSC->getValue(); + SDValue Mask = MSC->getMask(); + SDValue BasePtr = MSC->getBasePtr(); + + ISD::MemIndexType IndexType = MSC->getIndexType(); + bool IsScaled = + IndexType == ISD::SIGNED_SCALED || IndexType == ISD::UNSIGNED_SCALED; + bool IsSigned = + IndexType == ISD::SIGNED_SCALED || IndexType == ISD::SIGNED_UNSCALED; + bool NeedsExtend = + getGatherScatterIndexIsExtended(Index) || + Index.getSimpleValueType().getVectorElementType() == MVT::i32; + + EVT VT = StoreVal.getSimpleValueType(); + EVT IndexVT = Index.getSimpleValueType(); + SDVTList VTs = DAG.getVTList(MVT::Other); + EVT MemVT = MSC->getMemoryVT(); + SDValue InputVT = DAG.getValueType(MemVT); + + if (VT.getVectorElementType() == MVT::bf16 && + !static_cast(DAG.getSubtarget()).hasBF16()) + return SDValue(); + + if (IsFixedLength) { + IndexVT = getContainerForFixedLengthVector(DAG, IndexVT); + MemVT = IndexVT.changeVectorElementType(MemVT.getVectorElementType()); + InputVT = DAG.getValueType(MemVT.changeTypeToInteger()); + + StoreVal = + DAG.getNode(ISD::BITCAST, DL, VT.changeTypeToInteger(), StoreVal); + StoreVal = DAG.getNode( + ISD::ANY_EXTEND, DL, + VT.changeVectorElementType(IndexVT.getVectorElementType()), StoreVal); + StoreVal = convertToScalableVector(DAG, IndexVT, StoreVal); + } else if (VT.isFloatingPoint()) { + // Handle FP data by casting the data so an integer scatter can be used. + EVT StoreValVT = getPackedSVEVectorVT(VT.getVectorElementCount()); + StoreVal = getSVESafeBitCast(StoreValVT, StoreVal, DAG); + InputVT = DAG.getValueType(MemVT.changeVectorElementTypeToInteger()); + } + + if (getGatherScatterIndexIsExtended(Index)) + Index = Index.getOperand(0); + + unsigned Opcode = getScatterVecOpcode(IsScaled, IsSigned, NeedsExtend); + selectGatherScatterAddrMode(BasePtr, Index, MemVT, Opcode, + /*isGather=*/false, DAG); + + if (IsFixedLength) { + if (Index.getSimpleValueType().isFixedLengthVector()) + Index = convertToScalableVector(DAG, IndexVT, Index); + if (BasePtr.getSimpleValueType().isFixedLengthVector()) + BasePtr = convertToScalableVector(DAG, IndexVT, BasePtr); + Mask = convertFixedMaskToScalableVector(Mask, DAG); + } + + SDValue Ops[] = {Chain, StoreVal, Mask, BasePtr, Index, InputVT}; + return DAG.getNode(Opcode, DL, VTs, Ops); +} + SDValue AArch64TargetLowering::LowerPredReductionToSVE(SDValue ReduceOp, SelectionDAG &DAG) const { SDLoc DL(ReduceOp); diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h --- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h +++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h @@ -243,9 +243,15 @@ } bool isLegalMaskedGatherScatter(Type *DataType) const { - if (isa(DataType) || !ST->hasSVE()) + if (!ST->hasSVE()) return false; + // For fixed vectors, avoid scalarization if using SVE for them. + auto *DataTypeFVTy = dyn_cast(DataType); + if (DataTypeFVTy && (!ST->useSVEForFixedLengthVectors() || + DataTypeFVTy->getNumElements() < 2)) + return false; // Fall back to scalarization of masked operations. + return isLegalElementTypeForSVE(DataType->getScalarType()); } diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-gather.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-gather.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-gather.ll @@ -0,0 +1,1062 @@ +; RUN: llc -aarch64-sve-vector-bits-min=128 -asm-verbose=0 < %s | FileCheck %s -check-prefix=NO_SVE +; RUN: llc -aarch64-sve-vector-bits-min=256 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK +; RUN: llc -aarch64-sve-vector-bits-min=384 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK +; RUN: llc -aarch64-sve-vector-bits-min=512 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512 +; RUN: llc -aarch64-sve-vector-bits-min=640 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512 +; RUN: llc -aarch64-sve-vector-bits-min=768 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512 +; RUN: llc -aarch64-sve-vector-bits-min=896 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512 +; RUN: llc -aarch64-sve-vector-bits-min=1024 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512 +; RUN: llc -aarch64-sve-vector-bits-min=1152 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512 +; RUN: llc -aarch64-sve-vector-bits-min=1280 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512 +; RUN: llc -aarch64-sve-vector-bits-min=1408 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512 +; RUN: llc -aarch64-sve-vector-bits-min=1536 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512 +; RUN: llc -aarch64-sve-vector-bits-min=1664 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512 +; RUN: llc -aarch64-sve-vector-bits-min=1792 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512 +; RUN: llc -aarch64-sve-vector-bits-min=1920 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512 +; RUN: llc -aarch64-sve-vector-bits-min=2048 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_2048,VBITS_GE_1024,VBITS_GE_512 + +target triple = "aarch64-unknown-linux-gnu" + +; Don't use SVE when its registers are no bigger than NEON. +; NO_SVE-NOT: ptrue + +; +; LD1B +; + +define void @masked_gather_v2i8(<2 x i8>* %a, <2 x i8*>* %b) #0 { +; CHECK-LABEL: masked_gather_v2i8: +; CHECK: ldrb [[VALS_LO:w[0-9]+]], [x0] +; CHECK-NEXT: ldrb [[VALS_HI:w[0-9]+]], [x0, #1] +; CHECK-NEXT: ldr q[[PTRS:[0-9]+]], [x1] +; CHECK-NEXT: ptrue [[PG0:p[0-9]+]].s, vl2 +; CHECK-NEXT: fmov s[[VALS:[0-9]+]], [[VALS_LO]] +; CHECK-NEXT: mov v[[VALS]].s[1], [[VALS_HI]] +; CHECK-NEXT: cmeq v[[CMP:[0-9]+]].2s, v[[VALS]].2s, #0 +; CHECK-NEXT: cmpne [[MASK:p[0-9]+]].s, [[PG0]]/z, z[[CMP]].s, #0 +; CHECK-NEXT: ld1sb { z[[RES:[0-9]+]].d }, [[MASK]]/z, [z[[PTRS]].d] +; CHECK-NEXT: xtn v[[XTN:[0-9]+]].2s, v[[RES]].2d +; CHECK-NEXT: mov [[RES_HI:w[0-9]+]], v[[XTN]].s[1] +; CHECK-NEXT: fmov [[RES_LO:w[0-9]+]], s[[XTN]] +; CHECK-NEXT: strb [[RES_LO]], [x0] +; CHECK-NEXT: strb [[RES_HI]], [x0, #1] +; CHECK-NEXT: ret + %cval = load <2 x i8>, <2 x i8>* %a + %ptrs = load <2 x i8*>, <2 x i8*>* %b + %mask = icmp eq <2 x i8> %cval, zeroinitializer + %vals = call <2 x i8> @llvm.masked.gather.v2i8(<2 x i8*> %ptrs, i32 8, <2 x i1> %mask, <2 x i8> undef) + store <2 x i8> %vals, <2 x i8>* %a + ret void +} + +define void @masked_gather_v4i8(<4 x i8>* %a, <4 x i8*>* %b) #0 { +; CHECK-LABEL: masked_gather_v4i8: +; CHECK: ldrb [[VALS_LO1:w[0-9]+]], [x0] +; CHECK-NEXT: ldrb [[VALS_LO2:w[0-9]+]], [x0, #1] +; CHECK-NEXT: ldrb [[VALS_HI1:w[0-9]+]], [x0, #2] +; CHECK-NEXT: ldrb [[VALS_HI2:w[0-9]+]], [x0, #3] +; CHECK-NEXT: fmov s[[VALS:[0-9]+]], [[VALS_LO1]] +; CHECK-NEXT: ptrue [[PG0:p[0-9]+]].d, vl4 +; CHECK-NEXT: mov v[[VALS]].h[1], [[VALS_LO2]] +; CHECK-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG0]]/z, [x1] +; CHECK-NEXT: mov v[[VALS]].h[2], [[VALS_HI1]] +; CHECK-NEXT: mov v[[VALS]].h[3], [[VALS_HI2]] +; CHECK-NEXT: cmeq v[[CMP:[0-9]+]].4h, v[[VALS]].4h, #0 +; CHECK-NEXT: ptrue [[PG1:p[0-9]+]].h, vl4 +; CHECK-NEXT: cmpne [[MASK:p[0-9]+]].h, [[PG1]]/z, z[[CMP]].h, #0 +; CHECK-NEXT: ld1sb { [[RES:z[0-9]+]].d }, [[MASK]]/z, {{\[}}[[PTRS]].d] +; CHECK-NEXT: uzp1 [[UZP1:z[0-9]+]].s, [[RES]].s, [[RES]].s +; CHECK-NEXT: uzp1 z[[UZP2:[0-9]+]].h, [[UZP1]].h, [[UZP1]].h +; CHECK-NEXT: uzp1 v[[UZP3:[0-9]+]].8b, v[[UZP2]].8b, v[[UZP2]].8b +; CHECK-NEXT: str s[[UZP3]], [x0] +; CHECK-NEXT: ret + %cval = load <4 x i8>, <4 x i8>* %a + %ptrs = load <4 x i8*>, <4 x i8*>* %b + %mask = icmp eq <4 x i8> %cval, zeroinitializer + %vals = call <4 x i8> @llvm.masked.gather.v4i8(<4 x i8*> %ptrs, i32 8, <4 x i1> %mask, <4 x i8> undef) + store <4 x i8> %vals, <4 x i8>* %a + ret void +} + +define void @masked_gather_v8i8(<8 x i8>* %a, <8 x i8*>* %b) #0 { +; CHECK-LABEL: masked_gather_v8i8: +; VBITS_GE_512: ldr d[[VALS:[0-9]+]], [x0] +; VBITS_GE_512-NEXT: ptrue [[PG0:p[0-9]+]].d, vl8 +; VBITS_GE_512-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG0]]/z, [x1] +; VBITS_GE_512-NEXT: ptrue [[PG1:p[0-9]+]].b, vl8 +; VBITS_GE_512-NEXT: cmeq v[[CMP:[0-9]+]].8b, v[[VALS]].8b, #0 +; VBITS_GE_512-NEXT: cmpne [[MASK:p[0-9]+]].b, [[PG1]]/z, z[[CMP]].b, #0 +; VBITS_GE_512-NEXT: ld1b { [[RES:z[0-9]+]].d }, [[MASK]]/z, {{\[}}[[PTRS]].d] +; VBITS_GE_512-NEXT: uzp1 [[UZP1:z[0-9]+]].s, [[RES]].s, [[RES]].s +; VBITS_GE_512-NEXT: uzp1 [[UZP2:z[0-9]+]].h, [[UZP1]].h, [[UZP1]].h +; VBITS_GE_512-NEXT: uzp1 z[[UZP3:[0-9]+]].b, [[UZP2]].b, [[UZP2]].b +; VBITS_GE_512-NEXT: str d[[UZP3]], [x0] +; VBITS_GE_512-NEXT: ret + %cval = load <8 x i8>, <8 x i8>* %a + %ptrs = load <8 x i8*>, <8 x i8*>* %b + %mask = icmp eq <8 x i8> %cval, zeroinitializer + %vals = call <8 x i8> @llvm.masked.gather.v8i8(<8 x i8*> %ptrs, i32 8, <8 x i1> %mask, <8 x i8> undef) + store <8 x i8> %vals, <8 x i8>* %a + ret void +} + +define void @masked_gather_v16i8(<16 x i8>* %a, <16 x i8*>* %b) #0 { +; CHECK-LABEL: masked_gather_v16i8: +; VBITS_GE_1024: ldr q[[VALS:[0-9]+]], [x0] +; VBITS_GE_1024-NEXT: ptrue [[PG0:p[0-9]+]].d, vl16 +; VBITS_GE_1024-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG0]]/z, [x1] +; VBITS_GE_1024-NEXT: ptrue [[PG1:p[0-9]+]].b, vl16 +; VBITS_GE_1024-NEXT: cmeq v[[CMP:[0-9]+]].16b, v[[VALS]].16b, #0 +; VBITS_GE_1024-NEXT: cmpne [[MASK:p[0-9]+]].b, [[PG1]]/z, z[[CMP]].b, #0 +; VBITS_GE_1024-NEXT: ld1b { [[RES:z[0-9]+]].d }, [[MASK]]/z, {{\[}}[[PTRS]].d] +; VBITS_GE_1024-NEXT: uzp1 [[UZP1:z[0-9]+]].s, [[RES]].s, [[RES]].s +; VBITS_GE_1024-NEXT: uzp1 [[UZP2:z[0-9]+]].h, [[UZP1]].h, [[UZP1]].h +; VBITS_GE_1024-NEXT: uzp1 z[[UZP3:[0-9]+]].b, [[UZP2]].b, [[UZP2]].b +; VBITS_GE_1024-NEXT: str q[[UZP3]], [x0] +; VBITS_GE_1024-NEXT: ret + %cval = load <16 x i8>, <16 x i8>* %a + %ptrs = load <16 x i8*>, <16 x i8*>* %b + %mask = icmp eq <16 x i8> %cval, zeroinitializer + %vals = call <16 x i8> @llvm.masked.gather.v16i8(<16 x i8*> %ptrs, i32 8, <16 x i1> %mask, <16 x i8> undef) + store <16 x i8> %vals, <16 x i8>* %a + ret void +} + +define void @masked_gather_v32i8(<32 x i8>* %a, <32 x i8*>* %b) #0 { +; CHECK-LABEL: masked_gather_v32i8: +; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].b, vl32 +; VBITS_GE_2048-NEXT: ld1b { [[VALS:z[0-9]+]].b }, [[PG0]]/z, [x0] +; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].d, vl32 +; VBITS_GE_2048-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1] +; VBITS_GE_2048-NEXT: cmpeq [[MASK:p[0-9]+]].b, [[PG0]]/z, [[VALS]].b, #0 +; VBITS_GE_2048-NEXT: ld1b { [[RES:z[0-9]+]].d }, [[MASK]]/z, {{\[}}[[PTRS]].d] +; VBITS_GE_2048-NEXT: uzp1 [[UZP1:z[0-9]+]].s, [[RES]].s, [[RES]].s +; VBITS_GE_2048-NEXT: uzp1 [[UZP2:z[0-9]+]].h, [[UZP1]].h, [[UZP1]].h +; VBITS_GE_2048-NEXT: uzp1 [[UZP3:z[0-9]+]].b, [[UZP2]].b, [[UZP2]].b +; VBITS_GE_2048-NEXT: st1b { [[UZP3]].b }, [[PG0]], [x0] +; VBITS_GE_2048-NEXT: ret + %cval = load <32 x i8>, <32 x i8>* %a + %ptrs = load <32 x i8*>, <32 x i8*>* %b + %mask = icmp eq <32 x i8> %cval, zeroinitializer + %vals = call <32 x i8> @llvm.masked.gather.v32i8(<32 x i8*> %ptrs, i32 8, <32 x i1> %mask, <32 x i8> undef) + store <32 x i8> %vals, <32 x i8>* %a + ret void +} + +; +; LD1H +; + +define void @masked_gather_v2i16(<2 x i16>* %a, <2 x i16*>* %b) #0 { +; CHECK-LABEL: masked_gather_v2i16: +; CHECK: ldrh [[VALS_LO:w[0-9]+]], [x0] +; CHECK-NEXT: ldrh [[VALS_HI:w[0-9]+]], [x0, #2] +; CHECK-NEXT: ldr q[[PTRS:[0-9]+]], [x1] +; CHECK-NEXT: ptrue [[PG0:p[0-9]+]].s, vl2 +; CHECK-NEXT: fmov s[[VALS:[0-9]+]], [[VALS_LO]] +; CHECK-NEXT: mov v[[VALS]].s[1], [[VALS_HI]] +; CHECK-NEXT: cmeq v[[CMP:[0-9]+]].2s, v[[VALS]].2s, #0 +; CHECK-NEXT: cmpne [[MASK:p[0-9]+]].s, [[PG0]]/z, z[[CMP]].s, #0 +; CHECK-NEXT: ld1sh { z[[RES:[0-9]+]].d }, [[MASK]]/z, [z[[PTRS]].d] +; CHECK-NEXT: xtn v[[XTN:[0-9]+]].2s, v[[RES]].2d +; CHECK-NEXT: mov [[RES_HI:w[0-9]+]], v[[XTN]].s[1] +; CHECK-NEXT: fmov [[RES_LO:w[0-9]+]], s[[XTN]] +; CHECK-NEXT: strh [[RES_LO]], [x0] +; CHECK-NEXT: strh [[RES_HI]], [x0, #2] +; CHECK-NEXT: ret + %cval = load <2 x i16>, <2 x i16>* %a + %ptrs = load <2 x i16*>, <2 x i16*>* %b + %mask = icmp eq <2 x i16> %cval, zeroinitializer + %vals = call <2 x i16> @llvm.masked.gather.v2i16(<2 x i16*> %ptrs, i32 8, <2 x i1> %mask, <2 x i16> undef) + store <2 x i16> %vals, <2 x i16>* %a + ret void +} + +define void @masked_gather_v4i16(<4 x i16>* %a, <4 x i16*>* %b) #0 { +; CHECK-LABEL: masked_gather_v4i16: +; CHECK: ldr d[[VALS:[0-9]+]], [x0] +; CHECK-NEXT: ptrue [[PG0:p[0-9]+]].d, vl4 +; CHECK-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG0]]/z, [x1] +; CHECK-NEXT: ptrue [[PG1:p[0-9]+]].h, vl4 +; CHECK-NEXT: cmeq v[[CMP:[0-9]+]].4h, v[[VALS]].4h, #0 +; CHECK-NEXT: cmpne [[MASK:p[0-9]+]].h, [[PG1]]/z, z[[CMP]].h, #0 +; CHECK-NEXT: ld1h { [[RES:z[0-9]+]].d }, [[MASK]]/z, {{\[}}[[PTRS]].d] +; CHECK-NEXT: uzp1 [[UZP1:z[0-9]+]].s, [[RES]].s, [[RES]].s +; CHECK-NEXT: uzp1 z[[UZP2:[0-9]+]].h, [[UZP1]].h, [[UZP1]].h +; CHECK-NEXT: str d[[UZP2]], [x0] +; CHECK-NEXT: ret + %cval = load <4 x i16>, <4 x i16>* %a + %ptrs = load <4 x i16*>, <4 x i16*>* %b + %mask = icmp eq <4 x i16> %cval, zeroinitializer + %vals = call <4 x i16> @llvm.masked.gather.v4i16(<4 x i16*> %ptrs, i32 8, <4 x i1> %mask, <4 x i16> undef) + store <4 x i16> %vals, <4 x i16>* %a + ret void +} + +define void @masked_gather_v8i16(<8 x i16>* %a, <8 x i16*>* %b) #0 { +; CHECK-LABEL: masked_gather_v8i16: +; VBITS_GE_512: ldr q[[VALS:[0-9]+]], [x0] +; VBITS_GE_512-NEXT: ptrue [[PG0:p[0-9]+]].d, vl8 +; VBITS_GE_512-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG0]]/z, [x1] +; VBITS_GE_512-NEXT: ptrue [[PG1:p[0-9]+]].h, vl8 +; VBITS_GE_512-NEXT: cmeq v[[CMP:[0-9]+]].8h, v[[VALS]].8h, #0 +; VBITS_GE_512-NEXT: cmpne [[MASK:p[0-9]+]].h, [[PG1]]/z, z[[CMP]].h, #0 +; VBITS_GE_512-NEXT: ld1h { [[RES:z[0-9]+]].d }, [[MASK]]/z, {{\[}}[[PTRS]].d] +; VBITS_GE_512-NEXT: uzp1 [[UZP1:z[0-9]+]].s, [[RES]].s, [[RES]].s +; VBITS_GE_512-NEXT: uzp1 z[[UZP2:[0-9]+]].h, [[UZP1]].h, [[UZP1]].h +; VBITS_GE_512-NEXT: str q[[UZP2]], [x0] +; VBITS_GE_512-NEXT: ret + %cval = load <8 x i16>, <8 x i16>* %a + %ptrs = load <8 x i16*>, <8 x i16*>* %b + %mask = icmp eq <8 x i16> %cval, zeroinitializer + %vals = call <8 x i16> @llvm.masked.gather.v8i16(<8 x i16*> %ptrs, i32 8, <8 x i1> %mask, <8 x i16> undef) + store <8 x i16> %vals, <8 x i16>* %a + ret void +} + +define void @masked_gather_v16i16(<16 x i16>* %a, <16 x i16*>* %b) #0 { +; CHECK-LABEL: masked_gather_v16i16: +; VBITS_GE_1024: ptrue [[PG0:p[0-9]+]].h, vl16 +; VBITS_GE_1024-NEXT: ld1h { [[VALS:z[0-9]+]].h }, [[PG0]]/z, [x0] +; VBITS_GE_1024-NEXT: ptrue [[PG1:p[0-9]+]].d, vl16 +; VBITS_GE_1024-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1] +; VBITS_GE_1024-NEXT: cmpeq [[MASK:p[0-9]+]].h, [[PG0]]/z, [[VALS]].h, #0 +; VBITS_GE_1024-NEXT: ld1h { [[RES:z[0-9]+]].d }, [[MASK]]/z, {{\[}}[[PTRS]].d] +; VBITS_GE_1024-NEXT: uzp1 [[UZP1:z[0-9]+]].s, [[RES]].s, [[RES]].s +; VBITS_GE_1024-NEXT: uzp1 [[UZP2:z[0-9]+]].h, [[UZP1]].h, [[UZP1]].h +; VBITS_GE_1024-NEXT: st1h { [[UZP2]].h }, [[PG0]], [x0] +; VBITS_GE_1024-NEXT: ret + %cval = load <16 x i16>, <16 x i16>* %a + %ptrs = load <16 x i16*>, <16 x i16*>* %b + %mask = icmp eq <16 x i16> %cval, zeroinitializer + %vals = call <16 x i16> @llvm.masked.gather.v16i16(<16 x i16*> %ptrs, i32 8, <16 x i1> %mask, <16 x i16> undef) + store <16 x i16> %vals, <16 x i16>* %a + ret void +} + +define void @masked_gather_v32i16(<32 x i16>* %a, <32 x i16*>* %b) #0 { +; CHECK-LABEL: masked_gather_v32i16: +; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].h, vl32 +; VBITS_GE_2048-NEXT: ld1h { [[VALS:z[0-9]+]].h }, [[PG0]]/z, [x0] +; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].d, vl32 +; VBITS_GE_2048-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1] +; VBITS_GE_2048-NEXT: cmpeq [[MASK:p[0-9]+]].h, [[PG0]]/z, [[VALS]].h, #0 +; VBITS_GE_2048-NEXT: ld1h { [[RES:z[0-9]+]].d }, [[MASK]]/z, {{\[}}[[PTRS]].d] +; VBITS_GE_2048-NEXT: uzp1 [[UZP1:z[0-9]+]].s, [[RES]].s, [[RES]].s +; VBITS_GE_2048-NEXT: uzp1 [[UZP2:z[0-9]+]].h, [[UZP1]].h, [[UZP1]].h +; VBITS_GE_2048-NEXT: st1h { [[UZP2]].h }, [[PG0]], [x0] +; VBITS_GE_2048-NEXT: ret + %cval = load <32 x i16>, <32 x i16>* %a + %ptrs = load <32 x i16*>, <32 x i16*>* %b + %mask = icmp eq <32 x i16> %cval, zeroinitializer + %vals = call <32 x i16> @llvm.masked.gather.v32i16(<32 x i16*> %ptrs, i32 8, <32 x i1> %mask, <32 x i16> undef) + store <32 x i16> %vals, <32 x i16>* %a + ret void +} + +; +; LD1W +; + +define void @masked_gather_v2i32(<2 x i32>* %a, <2 x i32*>* %b) #0 { +; CHECK-LABEL: masked_gather_v2i32: +; CHECK: ldr d[[VALS:[0-9]+]], [x0] +; CHECK-NEXT: ldr q[[PTRS:[0-9]+]], [x1] +; CHECK-NEXT: ptrue [[PG0:p[0-9]+]].s, vl2 +; CHECK-NEXT: cmeq v[[CMP:[0-9]+]].2s, v[[VALS]].2s, #0 +; CHECK-NEXT: cmpne [[MASK:p[0-9]+]].s, [[PG0]]/z, z[[CMP]].s, #0 +; CHECK-NEXT: ld1w { z[[RES:[0-9]+]].d }, [[MASK]]/z, [z[[PTRS]].d] +; CHECK-NEXT: xtn v[[XTN:[0-9]+]].2s, v[[RES]].2d +; CHECK-NEXT: str d[[XTN]], [x0] +; CHECK-NEXT: ret + %cval = load <2 x i32>, <2 x i32>* %a + %ptrs = load <2 x i32*>, <2 x i32*>* %b + %mask = icmp eq <2 x i32> %cval, zeroinitializer + %vals = call <2 x i32> @llvm.masked.gather.v2i32(<2 x i32*> %ptrs, i32 8, <2 x i1> %mask, <2 x i32> undef) + store <2 x i32> %vals, <2 x i32>* %a + ret void +} + +define void @masked_gather_v4i32(<4 x i32>* %a, <4 x i32*>* %b) #0 { +; CHECK-LABEL: masked_gather_v4i32: +; CHECK: ldr q[[VALS:[0-9]+]], [x0] +; CHECK-NEXT: ptrue [[PG0:p[0-9]+]].d, vl4 +; CHECK-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG0]]/z, [x1] +; CHECK-NEXT: ptrue [[PG1:p[0-9]+]].s, vl4 +; CHECK-NEXT: cmeq v[[CMP:[0-9]+]].4s, v[[VALS]].4s, #0 +; CHECK-NEXT: cmpne [[MASK:p[0-9]+]].s, [[PG1]]/z, z[[CMP]].s, #0 +; CHECK-NEXT: ld1w { [[RES:z[0-9]+]].d }, [[MASK]]/z, {{\[}}[[PTRS]].d] +; CHECK-NEXT: uzp1 z[[UZP:[0-9]+]].s, [[RES]].s, [[RES]].s +; CHECK-NEXT: str q[[UZP]], [x0] +; CHECK-NEXT: ret + %cval = load <4 x i32>, <4 x i32>* %a + %ptrs = load <4 x i32*>, <4 x i32*>* %b + %mask = icmp eq <4 x i32> %cval, zeroinitializer + %vals = call <4 x i32> @llvm.masked.gather.v4i32(<4 x i32*> %ptrs, i32 8, <4 x i1> %mask, <4 x i32> undef) + store <4 x i32> %vals, <4 x i32>* %a + ret void +} + +define void @masked_gather_v8i32(<8 x i32>* %a, <8 x i32*>* %b) #0 { +; CHECK-LABEL: masked_gather_v8i32: +; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].s, vl8 +; VBITS_GE_512-NEXT: ld1w { [[VALS:z[0-9]+]].s }, [[PG0]]/z, [x0] +; VBITS_GE_512-NEXT: ptrue [[PG1:p[0-9]+]].d, vl8 +; VBITS_GE_512-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1] +; VBITS_GE_512-NEXT: cmpeq [[MASK:p[0-9]+]].s, [[PG0]]/z, [[VALS]].s, #0 +; VBITS_GE_512-NEXT: ld1w { [[RES:z[0-9]+]].d }, [[MASK]]/z, {{\[}}[[PTRS]].d] +; VBITS_GE_512-NEXT: uzp1 [[UZP:z[0-9]+]].s, [[RES]].s, [[RES]].s +; VBITS_GE_512-NEXT: st1w { [[UZP]].s }, [[PG0]], [x0] +; VBITS_GE_512-NEXT: ret + %cval = load <8 x i32>, <8 x i32>* %a + %ptrs = load <8 x i32*>, <8 x i32*>* %b + %mask = icmp eq <8 x i32> %cval, zeroinitializer + %vals = call <8 x i32> @llvm.masked.gather.v8i32(<8 x i32*> %ptrs, i32 8, <8 x i1> %mask, <8 x i32> undef) + store <8 x i32> %vals, <8 x i32>* %a + ret void +} + +define void @masked_gather_v16i32(<16 x i32>* %a, <16 x i32*>* %b) #0 { +; CHECK-LABEL: masked_gather_v16i32: +; VBITS_GE_1024: ptrue [[PG0:p[0-9]+]].s, vl16 +; VBITS_GE_1024-NEXT: ld1w { [[VALS:z[0-9]+]].s }, [[PG0]]/z, [x0] +; VBITS_GE_1024-NEXT: ptrue [[PG1:p[0-9]+]].d, vl16 +; VBITS_GE_1024-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1] +; VBITS_GE_1024-NEXT: cmpeq [[MASK:p[0-9]+]].s, [[PG0]]/z, [[VALS]].s, #0 +; VBITS_GE_1024-NEXT: ld1w { [[RES:z[0-9]+]].d }, [[MASK]]/z, {{\[}}[[PTRS]].d] +; VBITS_GE_1024-NEXT: uzp1 [[UZP:z[0-9]+]].s, [[RES]].s, [[RES]].s +; VBITS_GE_1024-NEXT: st1w { [[UZP]].s }, [[PG0]], [x0] +; VBITS_GE_1024-NEXT: ret + %cval = load <16 x i32>, <16 x i32>* %a + %ptrs = load <16 x i32*>, <16 x i32*>* %b + %mask = icmp eq <16 x i32> %cval, zeroinitializer + %vals = call <16 x i32> @llvm.masked.gather.v16i32(<16 x i32*> %ptrs, i32 8, <16 x i1> %mask, <16 x i32> undef) + store <16 x i32> %vals, <16 x i32>* %a + ret void +} + +define void @masked_gather_v32i32(<32 x i32>* %a, <32 x i32*>* %b) #0 { +; CHECK-LABEL: masked_gather_v32i32: +; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].s, vl32 +; VBITS_GE_2048-NEXT: ld1w { [[VALS:z[0-9]+]].s }, [[PG0]]/z, [x0] +; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].d, vl32 +; VBITS_GE_2048-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1] +; VBITS_GE_2048-NEXT: cmpeq [[MASK:p[0-9]+]].s, [[PG0]]/z, [[VALS]].s, #0 +; VBITS_GE_2048-NEXT: ld1w { [[RES:z[0-9]+]].d }, [[MASK]]/z, {{\[}}[[PTRS]].d] +; VBITS_GE_2048-NEXT: uzp1 [[UZP:z[0-9]+]].s, [[RES]].s, [[RES]].s +; VBITS_GE_2048-NEXT: st1w { [[UZP]].s }, [[PG0]], [x0] +; VBITS_GE_2048-NEXT: ret + %cval = load <32 x i32>, <32 x i32>* %a + %ptrs = load <32 x i32*>, <32 x i32*>* %b + %mask = icmp eq <32 x i32> %cval, zeroinitializer + %vals = call <32 x i32> @llvm.masked.gather.v32i32(<32 x i32*> %ptrs, i32 8, <32 x i1> %mask, <32 x i32> undef) + store <32 x i32> %vals, <32 x i32>* %a + ret void +} + +; +; LD1D +; + +; Scalarize 1 x i64 gathers +define void @masked_gather_v1i64(<1 x i64>* %a, <1 x i64*>* %b) #0 { +; CHECK-LABEL: masked_gather_v1i64: +; CHECK-NOT: ptrue + %cval = load <1 x i64>, <1 x i64>* %a + %ptrs = load <1 x i64*>, <1 x i64*>* %b + %mask = icmp eq <1 x i64> %cval, zeroinitializer + %vals = call <1 x i64> @llvm.masked.gather.v1i64(<1 x i64*> %ptrs, i32 8, <1 x i1> %mask, <1 x i64> undef) + store <1 x i64> %vals, <1 x i64>* %a + ret void +} + +define void @masked_gather_v2i64(<2 x i64>* %a, <2 x i64*>* %b) #0 { +; CHECK-LABEL: masked_gather_v2i64: +; CHECK: ldr q[[VALS:[0-9]+]], [x0] +; CHECK-NEXT: ldr q[[PTRS:[0-9]+]], [x1] +; CHECK-NEXT: ptrue [[PG0:p[0-9]+]].d, vl2 +; CHECK-NEXT: cmeq v[[CMP:[0-9]+]].2d, v[[VALS]].2d, #0 +; CHECK-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG0]]/z, z[[CMP]].d, #0 +; CHECK-NEXT: ld1d { z[[RES:[0-9]+]].d }, [[MASK]]/z, [z[[PTRS]].d] +; CHECK-NEXT: str q[[RES]], [x0] +; CHECK-NEXT: ret + %cval = load <2 x i64>, <2 x i64>* %a + %ptrs = load <2 x i64*>, <2 x i64*>* %b + %mask = icmp eq <2 x i64> %cval, zeroinitializer + %vals = call <2 x i64> @llvm.masked.gather.v2i64(<2 x i64*> %ptrs, i32 8, <2 x i1> %mask, <2 x i64> undef) + store <2 x i64> %vals, <2 x i64>* %a + ret void +} + +define void @masked_gather_v4i64(<4 x i64>* %a, <4 x i64*>* %b) #0 { +; CHECK-LABEL: masked_gather_v4i64: +; CHECK: ptrue [[PG0:p[0-9]+]].d, vl4 +; CHECK-NEXT: ld1d { [[VALS:z[0-9]+]].d }, [[PG0]]/z, [x0] +; CHECK-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG0]]/z, [x1] +; CHECK-NEXT: cmpeq [[MASK:p[0-9]+]].d, [[PG0]]/z, [[VALS]].d, #0 +; CHECK-NEXT: ld1d { [[RES:z[0-9]+]].d }, [[MASK]]/z, {{\[}}[[PTRS]].d] +; CHECK-NEXT: st1d { [[RES]].d }, [[PG0]], [x0] +; CHECK-NEXT: ret + %cval = load <4 x i64>, <4 x i64>* %a + %ptrs = load <4 x i64*>, <4 x i64*>* %b + %mask = icmp eq <4 x i64> %cval, zeroinitializer + %vals = call <4 x i64> @llvm.masked.gather.v4i64(<4 x i64*> %ptrs, i32 8, <4 x i1> %mask, <4 x i64> undef) + store <4 x i64> %vals, <4 x i64>* %a + ret void +} + +define void @masked_gather_v8i64(<8 x i64>* %a, <8 x i64*>* %b) #0 { +; CHECK-LABEL: masked_gather_v8i64: +; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].d, vl8 +; VBITS_GE_512-NEXT: ld1d { [[VALS:z[0-9]+]].d }, [[PG0]]/z, [x0] +; VBITS_GE_512-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG0]]/z, [x1] +; VBITS_GE_512-NEXT: cmpeq [[MASK:p[0-9]+]].d, [[PG0]]/z, [[VALS]].d, #0 +; VBITS_GE_512-NEXT: ld1d { [[RES:z[0-9]+]].d }, [[MASK]]/z, {{\[}}[[PTRS]].d] +; VBITS_GE_512-NEXT: st1d { [[RES]].d }, [[PG0]], [x0] +; VBITS_GE_512-NEXT: ret + %cval = load <8 x i64>, <8 x i64>* %a + %ptrs = load <8 x i64*>, <8 x i64*>* %b + %mask = icmp eq <8 x i64> %cval, zeroinitializer + %vals = call <8 x i64> @llvm.masked.gather.v8i64(<8 x i64*> %ptrs, i32 8, <8 x i1> %mask, <8 x i64> undef) + store <8 x i64> %vals, <8 x i64>* %a + ret void +} + +define void @masked_gather_v16i64(<16 x i64>* %a, <16 x i64*>* %b) #0 { +; CHECK-LABEL: masked_gather_v16i64: +; VBITS_GE_1024: ptrue [[PG0:p[0-9]+]].d, vl16 +; VBITS_GE_1024-NEXT: ld1d { [[VALS:z[0-9]+]].d }, [[PG0]]/z, [x0] +; VBITS_GE_1024-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG0]]/z, [x1] +; VBITS_GE_1024-NEXT: cmpeq [[MASK:p[0-9]+]].d, [[PG0]]/z, [[VALS]].d, #0 +; VBITS_GE_1024-NEXT: ld1d { [[RES:z[0-9]+]].d }, [[MASK]]/z, {{\[}}[[PTRS]].d] +; VBITS_GE_1024-NEXT: st1d { [[RES]].d }, [[PG0]], [x0] +; VBITS_GE_1024-NEXT: ret + %cval = load <16 x i64>, <16 x i64>* %a + %ptrs = load <16 x i64*>, <16 x i64*>* %b + %mask = icmp eq <16 x i64> %cval, zeroinitializer + %vals = call <16 x i64> @llvm.masked.gather.v16i64(<16 x i64*> %ptrs, i32 8, <16 x i1> %mask, <16 x i64> undef) + store <16 x i64> %vals, <16 x i64>* %a + ret void +} + +define void @masked_gather_v32i64(<32 x i64>* %a, <32 x i64*>* %b) #0 { +; CHECK-LABEL: masked_gather_v32i64: +; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].d, vl32 +; VBITS_GE_2048-NEXT: ld1d { [[VALS:z[0-9]+]].d }, [[PG0]]/z, [x0] +; VBITS_GE_2048-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG0]]/z, [x1] +; VBITS_GE_2048-NEXT: cmpeq [[MASK:p[0-9]+]].d, [[PG0]]/z, [[VALS]].d, #0 +; VBITS_GE_2048-NEXT: ld1d { [[RES:z[0-9]+]].d }, [[MASK]]/z, {{\[}}[[PTRS]].d] +; VBITS_GE_2048-NEXT: st1d { [[RES]].d }, [[PG0]], [x0] +; VBITS_GE_2048-NEXT: ret + %cval = load <32 x i64>, <32 x i64>* %a + %ptrs = load <32 x i64*>, <32 x i64*>* %b + %mask = icmp eq <32 x i64> %cval, zeroinitializer + %vals = call <32 x i64> @llvm.masked.gather.v32i64(<32 x i64*> %ptrs, i32 8, <32 x i1> %mask, <32 x i64> undef) + store <32 x i64> %vals, <32 x i64>* %a + ret void +} + +; +; LD1H (float) +; + +define void @masked_gather_v2f16(<2 x half>* %a, <2 x half*>* %b) #0 { +; CHECK-LABEL: masked_gather_v2f16: +; CHECK: ldr s[[VALS:[0-9]+]], [x0] +; CHECK-NEXT: movi d[[ZERO:[0-9]+]], #0000000000000000 +; CHECK-NEXT: ldr q[[PTRS:[0-9]+]], [x1] +; CHECK-NEXT: ptrue [[PG0:p[0-9]+]].h, vl4 +; CHECK-NEXT: fcmeq v[[CMP:[0-9]+]].4h, v[[VALS]].4h, #0.0 +; CHECK-NEXT: umov w8, v[[CMP]].h[0] +; CHECK-NEXT: umov w9, v[[CMP]].h[1] +; CHECK-NEXT: fmov s[[CMP]], w8 +; CHECK-NEXT: mov v[[CMP]].s[1], w9 +; CHECK-NEXT: shl v[[CMP]].2s, v[[CMP]].2s, #16 +; CHECK-NEXT: sshr v[[CMP]].2s, v[[CMP]].2s, #16 +; CHECK-NEXT: fmov w9, s[[CMP]] +; CHECK-NEXT: mov w8, v[[CMP]].s[1] +; CHECK-NEXT: mov v[[NCMP:[0-9]+]].h[0], w9 +; CHECK-NEXT: mov v[[NCMP]].h[1], w8 +; CHECK-NEXT: shl v[[SHL:[0-9]+]].4h, v[[NCMP]].4h, #15 +; CHECK-NEXT: sshr v[[SHL]].4h, v[[SHL]].4h, #15 +; CHECK-NEXT: cmpne [[MASK:p[0-9]+]].h, [[PG0]]/z, z[[SHL]].h, #0 +; CHECK-NEXT: ld1h { [[RES:z[0-9]+]].d }, [[MASK]]/z, [z[[PTRS]].d] +; CHECK-NEXT: uzp1 [[UZP1:z[0-9]+]].s, [[RES]].s, [[RES]].s +; CHECK-NEXT: uzp1 z[[UZP2:[0-9]+]].h, [[UZP1]].h, [[UZP1]].h +; CHECK-NEXT: str s[[UZP2]], [x0] +; CHECK-NEXT: ret + %cval = load <2 x half>, <2 x half>* %a + %ptrs = load <2 x half*>, <2 x half*>* %b + %mask = fcmp oeq <2 x half> %cval, zeroinitializer + %vals = call <2 x half> @llvm.masked.gather.v2f16(<2 x half*> %ptrs, i32 8, <2 x i1> %mask, <2 x half> undef) + store <2 x half> %vals, <2 x half>* %a + ret void +} + +define void @masked_gather_v4f16(<4 x half>* %a, <4 x half*>* %b) #0 { +; CHECK-LABEL: masked_gather_v4f16: +; CHECK: ldr d[[VALS:[0-9]+]], [x0] +; CHECK-NEXT: ptrue [[PG0:p[0-9]+]].d, vl4 +; CHECK-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG0]]/z, [x1] +; CHECK-NEXT: ptrue [[PG1:p[0-9]+]].h, vl4 +; CHECK-NEXT: fcmeq v[[CMP:[0-9]+]].4h, v[[VALS]].4h, #0 +; CHECK-NEXT: cmpne [[MASK:p[0-9]+]].h, [[PG1]]/z, z[[CMP]].h, #0 +; CHECK-NEXT: ld1h { [[RES:z[0-9]+]].d }, [[MASK]]/z, {{\[}}[[PTRS]].d] +; CHECK-NEXT: uzp1 [[UZP1:z[0-9]+]].s, [[RES]].s, [[RES]].s +; CHECK-NEXT: uzp1 z[[UZP2:[0-9]+]].h, [[UZP1]].h, [[UZP1]].h +; CHECK-NEXT: str d[[UZP2]], [x0] +; CHECK-NEXT: ret + %cval = load <4 x half>, <4 x half>* %a + %ptrs = load <4 x half*>, <4 x half*>* %b + %mask = fcmp oeq <4 x half> %cval, zeroinitializer + %vals = call <4 x half> @llvm.masked.gather.v4f16(<4 x half*> %ptrs, i32 8, <4 x i1> %mask, <4 x half> undef) + store <4 x half> %vals, <4 x half>* %a + ret void +} + +define void @masked_gather_v8f16(<8 x half>* %a, <8 x half*>* %b) #0 { +; CHECK-LABEL: masked_gather_v8f16: +; VBITS_GE_512: ldr q[[VALS:[0-9]+]], [x0] +; VBITS_GE_512-NEXT: ptrue [[PG0:p[0-9]+]].d, vl8 +; VBITS_GE_512-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG0]]/z, [x1] +; VBITS_GE_512-NEXT: ptrue [[PG1:p[0-9]+]].h, vl8 +; VBITS_GE_512-NEXT: fcmeq v[[CMP:[0-9]+]].8h, v[[VALS]].8h, #0 +; VBITS_GE_512-NEXT: cmpne [[MASK:p[0-9]+]].h, [[PG1]]/z, z[[CMP]].h, #0 +; VBITS_GE_512-NEXT: ld1h { [[RES:z[0-9]+]].d }, [[MASK]]/z, {{\[}}[[PTRS]].d] +; VBITS_GE_512-NEXT: uzp1 [[UZP1:z[0-9]+]].s, [[RES]].s, [[RES]].s +; VBITS_GE_512-NEXT: uzp1 z[[UZP2:[0-9]+]].h, [[UZP1]].h, [[UZP1]].h +; VBITS_GE_512-NEXT: str q[[UZP2]], [x0] +; VBITS_GE_512-NEXT: ret + %cval = load <8 x half>, <8 x half>* %a + %ptrs = load <8 x half*>, <8 x half*>* %b + %mask = fcmp oeq <8 x half> %cval, zeroinitializer + %vals = call <8 x half> @llvm.masked.gather.v8f16(<8 x half*> %ptrs, i32 8, <8 x i1> %mask, <8 x half> undef) + store <8 x half> %vals, <8 x half>* %a + ret void +} + +define void @masked_gather_v16f16(<16 x half>* %a, <16 x half*>* %b) #0 { +; CHECK-LABEL: masked_gather_v16f16: +; VBITS_GE_1024: ptrue [[PG0:p[0-9]+]].h, vl16 +; VBITS_GE_1024-NEXT: ld1h { [[VALS:z[0-9]+]].h }, [[PG0]]/z, [x0] +; VBITS_GE_1024-NEXT: ptrue [[PG1:p[0-9]+]].d, vl16 +; VBITS_GE_1024-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1] +; VBITS_GE_1024-NEXT: mov [[ZERO:z[0-9]+]].h, #0 +; VBITS_GE_1024-NEXT: fcmeq [[MASK:p[0-9]+]].h, [[PG0]]/z, [[VALS]].h, [[ZERO]].h +; VBITS_GE_1024-NEXT: ld1h { [[RES:z[0-9]+]].d }, [[MASK]]/z, {{\[}}[[PTRS]].d] +; VBITS_GE_1024-NEXT: uzp1 [[UZP1:z[0-9]+]].s, [[RES]].s, [[RES]].s +; VBITS_GE_1024-NEXT: uzp1 [[UZP2:z[0-9]+]].h, [[UZP1]].h, [[UZP1]].h +; VBITS_GE_1024-NEXT: st1h { [[UZP2]].h }, [[PG0]], [x0] +; VBITS_GE_1024-NEXT: ret + %cval = load <16 x half>, <16 x half>* %a + %ptrs = load <16 x half*>, <16 x half*>* %b + %mask = fcmp oeq <16 x half> %cval, zeroinitializer + %vals = call <16 x half> @llvm.masked.gather.v16f16(<16 x half*> %ptrs, i32 8, <16 x i1> %mask, <16 x half> undef) + store <16 x half> %vals, <16 x half>* %a + ret void +} + +define void @masked_gather_v32f16(<32 x half>* %a, <32 x half*>* %b) #0 { +; CHECK-LABEL: masked_gather_v32f16: +; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].h, vl32 +; VBITS_GE_2048-NEXT: ld1h { [[VALS:z[0-9]+]].h }, [[PG0]]/z, [x0] +; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].d, vl32 +; VBITS_GE_2048-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1] +; VBITS_GE_2048-NEXT: mov [[ZERO:z[0-9]+]].h, #0 +; VBITS_GE_2048-NEXT: fcmeq [[MASK:p[0-9]+]].h, [[PG0]]/z, [[VALS]].h, [[ZERO]].h +; VBITS_GE_2048-NEXT: ld1h { [[RES:z[0-9]+]].d }, [[MASK]]/z, {{\[}}[[PTRS]].d] +; VBITS_GE_2048-NEXT: uzp1 [[UZP1:z[0-9]+]].s, [[RES]].s, [[RES]].s +; VBITS_GE_2048-NEXT: uzp1 [[UZP2:z[0-9]+]].h, [[UZP1]].h, [[UZP1]].h +; VBITS_GE_2048-NEXT: st1h { [[UZP2]].h }, [[PG0]], [x0] +; VBITS_GE_2048-NEXT: ret + %cval = load <32 x half>, <32 x half>* %a + %ptrs = load <32 x half*>, <32 x half*>* %b + %mask = fcmp oeq <32 x half> %cval, zeroinitializer + %vals = call <32 x half> @llvm.masked.gather.v32f16(<32 x half*> %ptrs, i32 8, <32 x i1> %mask, <32 x half> undef) + store <32 x half> %vals, <32 x half>* %a + ret void +} + +; +; LD1W (float) +; + +define void @masked_gather_v2f32(<2 x float>* %a, <2 x float*>* %b) #0 { +; CHECK-LABEL: masked_gather_v2f32: +; CHECK: ldr d[[VALS:[0-9]+]], [x0] +; CHECK-NEXT: ldr q[[PTRS:[0-9]+]], [x1] +; CHECK-NEXT: ptrue [[PG0:p[0-9]+]].s, vl2 +; CHECK-NEXT: fcmeq v[[CMP:[0-9]+]].2s, v[[VALS]].2s, #0 +; CHECK-NEXT: cmpne [[MASK:p[0-9]+]].s, [[PG0]]/z, z[[CMP]].s, #0 +; CHECK-NEXT: ld1w { z[[RES:[0-9]+]].d }, [[MASK]]/z, [z[[PTRS]].d] +; CHECK-NEXT: xtn v[[XTN:[0-9]+]].2s, v[[RES]].2d +; CHECK-NEXT: str d[[XTN]], [x0] +; CHECK-NEXT: ret + %cval = load <2 x float>, <2 x float>* %a + %ptrs = load <2 x float*>, <2 x float*>* %b + %mask = fcmp oeq <2 x float> %cval, zeroinitializer + %vals = call <2 x float> @llvm.masked.gather.v2f32(<2 x float*> %ptrs, i32 8, <2 x i1> %mask, <2 x float> undef) + store <2 x float> %vals, <2 x float>* %a + ret void +} + +define void @masked_gather_v4f32(<4 x float>* %a, <4 x float*>* %b) #0 { +; CHECK-LABEL: masked_gather_v4f32: +; CHECK: ldr q[[VALS:[0-9]+]], [x0] +; CHECK-NEXT: ptrue [[PG0:p[0-9]+]].d, vl4 +; CHECK-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG0]]/z, [x1] +; CHECK-NEXT: ptrue [[PG1:p[0-9]+]].s, vl4 +; CHECK-NEXT: fcmeq v[[CMP:[0-9]+]].4s, v[[VALS]].4s, #0 +; CHECK-NEXT: cmpne [[MASK:p[0-9]+]].s, [[PG1]]/z, z[[CMP]].s, #0 +; CHECK-NEXT: ld1w { [[RES:z[0-9]+]].d }, [[MASK]]/z, {{\[}}[[PTRS]].d] +; CHECK-NEXT: uzp1 z[[UZP:[0-9]+]].s, [[RES]].s, [[RES]].s +; CHECK-NEXT: str q[[UZP]], [x0] +; CHECK-NEXT: ret + %cval = load <4 x float>, <4 x float>* %a + %ptrs = load <4 x float*>, <4 x float*>* %b + %mask = fcmp oeq <4 x float> %cval, zeroinitializer + %vals = call <4 x float> @llvm.masked.gather.v4f32(<4 x float*> %ptrs, i32 8, <4 x i1> %mask, <4 x float> undef) + store <4 x float> %vals, <4 x float>* %a + ret void +} + +define void @masked_gather_v8f32(<8 x float>* %a, <8 x float*>* %b) #0 { +; CHECK-LABEL: masked_gather_v8f32: +; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].s, vl8 +; VBITS_GE_512-NEXT: ld1w { [[VALS:z[0-9]+]].s }, [[PG0]]/z, [x0] +; VBITS_GE_512-NEXT: ptrue [[PG1:p[0-9]+]].d, vl8 +; VBITS_GE_512-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1] +; VBITS_GE_512-NEXT: mov [[ZERO:z[0-9]+]].s, #0 +; VBITS_GE_512-NEXT: fcmeq [[MASK:p[0-9]+]].s, [[PG0]]/z, [[VALS]].s, [[ZERO]].s +; VBITS_GE_512-NEXT: ld1w { [[RES:z[0-9]+]].d }, [[MASK]]/z, {{\[}}[[PTRS]].d] +; VBITS_GE_512-NEXT: uzp1 [[UZP:z[0-9]+]].s, [[RES]].s, [[RES]].s +; VBITS_GE_512-NEXT: st1w { [[UZP]].s }, [[PG0]], [x0] +; VBITS_GE_512-NEXT: ret + %cval = load <8 x float>, <8 x float>* %a + %ptrs = load <8 x float*>, <8 x float*>* %b + %mask = fcmp oeq <8 x float> %cval, zeroinitializer + %vals = call <8 x float> @llvm.masked.gather.v8f32(<8 x float*> %ptrs, i32 8, <8 x i1> %mask, <8 x float> undef) + store <8 x float> %vals, <8 x float>* %a + ret void +} + +define void @masked_gather_v16f32(<16 x float>* %a, <16 x float*>* %b) #0 { +; CHECK-LABEL: masked_gather_v16f32: +; VBITS_GE_1024: ptrue [[PG0:p[0-9]+]].s, vl16 +; VBITS_GE_1024-NEXT: ld1w { [[VALS:z[0-9]+]].s }, [[PG0]]/z, [x0] +; VBITS_GE_1024-NEXT: ptrue [[PG1:p[0-9]+]].d, vl16 +; VBITS_GE_1024-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1] +; VBITS_GE_1024-NEXT: mov [[ZERO:z[0-9]+]].s, #0 +; VBITS_GE_1024-NEXT: fcmeq [[MASK:p[0-9]+]].s, [[PG0]]/z, [[VALS]].s, [[ZERO]].s +; VBITS_GE_1024-NEXT: ld1w { [[RES:z[0-9]+]].d }, [[MASK]]/z, {{\[}}[[PTRS]].d] +; VBITS_GE_1024-NEXT: uzp1 [[UZP:z[0-9]+]].s, [[RES]].s, [[RES]].s +; VBITS_GE_1024-NEXT: st1w { [[UZP]].s }, [[PG0]], [x0] +; VBITS_GE_1024-NEXT: ret + %cval = load <16 x float>, <16 x float>* %a + %ptrs = load <16 x float*>, <16 x float*>* %b + %mask = fcmp oeq <16 x float> %cval, zeroinitializer + %vals = call <16 x float> @llvm.masked.gather.v16f32(<16 x float*> %ptrs, i32 8, <16 x i1> %mask, <16 x float> undef) + store <16 x float> %vals, <16 x float>* %a + ret void +} + +define void @masked_gather_v32f32(<32 x float>* %a, <32 x float*>* %b) #0 { +; CHECK-LABEL: masked_gather_v32f32: +; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].s, vl32 +; VBITS_GE_2048-NEXT: ld1w { [[VALS:z[0-9]+]].s }, [[PG0]]/z, [x0] +; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].d, vl32 +; VBITS_GE_2048-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1] +; VBITS_GE_2048-NEXT: mov [[ZERO:z[0-9]+]].s, #0 +; VBITS_GE_2048-NEXT: fcmeq [[MASK:p[0-9]+]].s, [[PG0]]/z, [[VALS]].s, [[ZERO]].s +; VBITS_GE_2048-NEXT: ld1w { [[RES:z[0-9]+]].d }, [[MASK]]/z, {{\[}}[[PTRS]].d] +; VBITS_GE_2048-NEXT: uzp1 [[UZP:z[0-9]+]].s, [[RES]].s, [[RES]].s +; VBITS_GE_2048-NEXT: st1w { [[UZP]].s }, [[PG0]], [x0] +; VBITS_GE_2048-NEXT: ret + %cval = load <32 x float>, <32 x float>* %a + %ptrs = load <32 x float*>, <32 x float*>* %b + %mask = fcmp oeq <32 x float> %cval, zeroinitializer + %vals = call <32 x float> @llvm.masked.gather.v32f32(<32 x float*> %ptrs, i32 8, <32 x i1> %mask, <32 x float> undef) + store <32 x float> %vals, <32 x float>* %a + ret void +} + +; +; LD1D (float) +; + +; Scalarize 1 x double gathers +define void @masked_gather_v1f64(<1 x double>* %a, <1 x double*>* %b) #0 { +; CHECK-LABEL: masked_gather_v1f64: +; CHECK-NOT: ptrue + %cval = load <1 x double>, <1 x double>* %a + %ptrs = load <1 x double*>, <1 x double*>* %b + %mask = fcmp oeq <1 x double> %cval, zeroinitializer + %vals = call <1 x double> @llvm.masked.gather.v1f64(<1 x double*> %ptrs, i32 8, <1 x i1> %mask, <1 x double> undef) + store <1 x double> %vals, <1 x double>* %a + ret void +} + +define void @masked_gather_v2f64(<2 x double>* %a, <2 x double*>* %b) #0 { +; CHECK-LABEL: masked_gather_v2f64: +; CHECK: ldr q[[VALS:[0-9]+]], [x0] +; CHECK-NEXT: ldr q[[PTRS:[0-9]+]], [x1] +; CHECK-NEXT: ptrue [[PG0:p[0-9]+]].d, vl2 +; CHECK-NEXT: fcmeq v[[CMP:[0-9]+]].2d, v[[VALS]].2d, #0 +; CHECK-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG0]]/z, z[[CMP]].d, #0 +; CHECK-NEXT: ld1d { z[[RES:[0-9]+]].d }, [[MASK]]/z, [z[[PTRS]].d] +; CHECK-NEXT: str q[[RES]], [x0] +; CHECK-NEXT: ret + %cval = load <2 x double>, <2 x double>* %a + %ptrs = load <2 x double*>, <2 x double*>* %b + %mask = fcmp oeq <2 x double> %cval, zeroinitializer + %vals = call <2 x double> @llvm.masked.gather.v2f64(<2 x double*> %ptrs, i32 8, <2 x i1> %mask, <2 x double> undef) + store <2 x double> %vals, <2 x double>* %a + ret void +} + +define void @masked_gather_v4f64(<4 x double>* %a, <4 x double*>* %b) #0 { +; CHECK-LABEL: masked_gather_v4f64: +; CHECK: ptrue [[PG0:p[0-9]+]].d, vl4 +; CHECK-NEXT: ld1d { [[VALS:z[0-9]+]].d }, [[PG0]]/z, [x0] +; CHECK-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG0]]/z, [x1] +; CHECK-NEXT: mov [[ZERO:z[0-9]+]].d, #0 +; CHECK-NEXT: fcmeq [[MASK:p[0-9]+]].d, [[PG0]]/z, [[VALS]].d, [[ZERO]].d +; CHECK-NEXT: ld1d { [[RES:z[0-9]+]].d }, [[MASK]]/z, {{\[}}[[PTRS]].d] +; CHECK-NEXT: st1d { [[RES]].d }, [[PG0]], [x0] +; CHECK-NEXT: ret + %cval = load <4 x double>, <4 x double>* %a + %ptrs = load <4 x double*>, <4 x double*>* %b + %mask = fcmp oeq <4 x double> %cval, zeroinitializer + %vals = call <4 x double> @llvm.masked.gather.v4f64(<4 x double*> %ptrs, i32 8, <4 x i1> %mask, <4 x double> undef) + store <4 x double> %vals, <4 x double>* %a + ret void +} + +define void @masked_gather_v8f64(<8 x double>* %a, <8 x double*>* %b) #0 { +; CHECK-LABEL: masked_gather_v8f64: +; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].d, vl8 +; VBITS_GE_512-NEXT: ld1d { [[VALS:z[0-9]+]].d }, [[PG0]]/z, [x0] +; VBITS_GE_512-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG0]]/z, [x1] +; VBITS_GE_512-NEXT: mov [[ZERO:z[0-9]+]].d, #0 +; VBITS_GE_512-NEXT: fcmeq [[MASK:p[0-9]+]].d, [[PG0]]/z, [[VALS]].d, [[ZERO]].d +; VBITS_GE_512-NEXT: ld1d { [[RES:z[0-9]+]].d }, [[MASK]]/z, {{\[}}[[PTRS]].d] +; VBITS_GE_512-NEXT: st1d { [[RES]].d }, [[PG0]], [x0] +; VBITS_GE_512-NEXT: ret + %cval = load <8 x double>, <8 x double>* %a + %ptrs = load <8 x double*>, <8 x double*>* %b + %mask = fcmp oeq <8 x double> %cval, zeroinitializer + %vals = call <8 x double> @llvm.masked.gather.v8f64(<8 x double*> %ptrs, i32 8, <8 x i1> %mask, <8 x double> undef) + store <8 x double> %vals, <8 x double>* %a + ret void +} + +define void @masked_gather_v16f64(<16 x double>* %a, <16 x double*>* %b) #0 { +; CHECK-LABEL: masked_gather_v16f64: +; VBITS_GE_1024: ptrue [[PG0:p[0-9]+]].d, vl16 +; VBITS_GE_1024-NEXT: ld1d { [[VALS:z[0-9]+]].d }, [[PG0]]/z, [x0] +; VBITS_GE_1024-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG0]]/z, [x1] +; VBITS_GE_1024-NEXT: mov [[ZERO:z[0-9]+]].d, #0 +; VBITS_GE_1024-NEXT: fcmeq [[MASK:p[0-9]+]].d, [[PG0]]/z, [[VALS]].d, [[ZERO]].d +; VBITS_GE_1024-NEXT: ld1d { [[RES:z[0-9]+]].d }, [[MASK]]/z, {{\[}}[[PTRS]].d] +; VBITS_GE_1024-NEXT: st1d { [[RES]].d }, [[PG0]], [x0] +; VBITS_GE_1024-NEXT: ret + %cval = load <16 x double>, <16 x double>* %a + %ptrs = load <16 x double*>, <16 x double*>* %b + %mask = fcmp oeq <16 x double> %cval, zeroinitializer + %vals = call <16 x double> @llvm.masked.gather.v16f64(<16 x double*> %ptrs, i32 8, <16 x i1> %mask, <16 x double> undef) + store <16 x double> %vals, <16 x double>* %a + ret void +} + +define void @masked_gather_v32f64(<32 x double>* %a, <32 x double*>* %b) #0 { +; CHECK-LABEL: masked_gather_v32f64: +; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].d, vl32 +; VBITS_GE_2048-NEXT: ld1d { [[VALS:z[0-9]+]].d }, [[PG0]]/z, [x0] +; VBITS_GE_2048-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG0]]/z, [x1] +; VBITS_GE_2048-NEXT: mov [[ZERO:z[0-9]+]].d, #0 +; VBITS_GE_2048-NEXT: fcmeq [[MASK:p[0-9]+]].d, [[PG0]]/z, [[VALS]].d, [[ZERO]].d +; VBITS_GE_2048-NEXT: ld1d { [[RES:z[0-9]+]].d }, [[MASK]]/z, {{\[}}[[PTRS]].d] +; VBITS_GE_2048-NEXT: st1d { [[RES]].d }, [[PG0]], [x0] +; VBITS_GE_2048-NEXT: ret + %cval = load <32 x double>, <32 x double>* %a + %ptrs = load <32 x double*>, <32 x double*>* %b + %mask = fcmp oeq <32 x double> %cval, zeroinitializer + %vals = call <32 x double> @llvm.masked.gather.v32f64(<32 x double*> %ptrs, i32 8, <32 x i1> %mask, <32 x double> undef) + store <32 x double> %vals, <32 x double>* %a + ret void +} + +; The above tests test the types, the below tests check that the addressing +; modes still function + +define void @masked_gather_32b_scaled_sext(<32 x half>* %a, <32 x i32>* %b, half* %base) #0 { +; CHECK-LABEL: masked_gather_32b_scaled_sext: +; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].h, vl32 +; VBITS_GE_2048-NEXT: ld1h { [[VALS:z[0-9]+]].h }, [[PG0]]/z, [x0] +; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].s, vl32 +; VBITS_GE_2048-NEXT: ld1w { [[PTRS:z[0-9]+]].s }, [[PG1]]/z, [x1] +; VBITS_GE_2048-NEXT: mov [[ZERO:z[0-9]+]].h, #0 +; VBITS_GE_2048-NEXT: fcmeq [[MASK:p[0-9]+]].h, [[PG0]]/z, [[VALS]].h, [[ZERO]] +; VBITS_GE_2048-NEXT: ld1h { [[RES:z[0-9]+]].s }, [[MASK]]/z, [x2, [[PTRS]].s, sxtw #1] +; VBITS_GE_2048-NEXT: uzp1 [[UZP:z[0-9]+]].h, [[RES]].h, [[RES]].h +; VBITS_GE_2048-NEXT: st1h { [[UZP]].h }, [[PG0]], [x0] +; VBITS_GE_2048-NEXT: ret + %cvals = load <32 x half>, <32 x half>* %a + %idxs = load <32 x i32>, <32 x i32>* %b + %ext = sext <32 x i32> %idxs to <32 x i64> + %ptrs = getelementptr half, half* %base, <32 x i64> %ext + %mask = fcmp oeq <32 x half> %cvals, zeroinitializer + %vals = call <32 x half> @llvm.masked.gather.v32f16(<32 x half*> %ptrs, i32 8, <32 x i1> %mask, <32 x half> undef) + store <32 x half> %vals, <32 x half>* %a + ret void +} + +define void @masked_gather_32b_scaled_zext(<32 x half>* %a, <32 x i32>* %b, half* %base) #0 { +; CHECK-LABEL: masked_gather_32b_scaled_zext: +; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].h, vl32 +; VBITS_GE_2048-NEXT: ld1h { [[VALS:z[0-9]+]].h }, [[PG0]]/z, [x0] +; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].s, vl32 +; VBITS_GE_2048-NEXT: ld1w { [[PTRS:z[0-9]+]].s }, [[PG1]]/z, [x1] +; VBITS_GE_2048-NEXT: mov [[ZERO:z[0-9]+]].h, #0 +; VBITS_GE_2048-NEXT: fcmeq [[MASK:p[0-9]+]].h, [[PG0]]/z, [[VALS]].h, [[ZERO]] +; VBITS_GE_2048-NEXT: ld1h { [[RES:z[0-9]+]].s }, [[MASK]]/z, [x2, [[PTRS]].s, uxtw #1] +; VBITS_GE_2048-NEXT: uzp1 [[UZP:z[0-9]+]].h, [[RES]].h, [[RES]].h +; VBITS_GE_2048-NEXT: st1h { [[UZP]].h }, [[PG0]], [x0] +; VBITS_GE_2048-NEXT: ret + %cvals = load <32 x half>, <32 x half>* %a + %idxs = load <32 x i32>, <32 x i32>* %b + %ext = zext <32 x i32> %idxs to <32 x i64> + %ptrs = getelementptr half, half* %base, <32 x i64> %ext + %mask = fcmp oeq <32 x half> %cvals, zeroinitializer + %vals = call <32 x half> @llvm.masked.gather.v32f16(<32 x half*> %ptrs, i32 8, <32 x i1> %mask, <32 x half> undef) + store <32 x half> %vals, <32 x half>* %a + ret void +} + +define void @masked_gather_32b_unscaled_sext(<32 x half>* %a, <32 x i32>* %b, i8* %base) #0 { +; CHECK-LABEL: masked_gather_32b_unscaled_sext: +; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].h, vl32 +; VBITS_GE_2048-NEXT: ld1h { [[VALS:z[0-9]+]].h }, [[PG0]]/z, [x0] +; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].s, vl32 +; VBITS_GE_2048-NEXT: ld1w { [[PTRS:z[0-9]+]].s }, [[PG1]]/z, [x1] +; VBITS_GE_2048-NEXT: mov [[ZERO:z[0-9]+]].h, #0 +; VBITS_GE_2048-NEXT: fcmeq [[MASK:p[0-9]+]].h, [[PG0]]/z, [[VALS]].h, [[ZERO]] +; VBITS_GE_2048-NEXT: ld1h { [[RES:z[0-9]+]].s }, [[MASK]]/z, [x2, [[PTRS]].s, sxtw] +; VBITS_GE_2048-NEXT: uzp1 [[UZP:z[0-9]+]].h, [[RES]].h, [[RES]].h +; VBITS_GE_2048-NEXT: st1h { [[UZP]].h }, [[PG0]], [x0] +; VBITS_GE_2048-NEXT: ret + %cvals = load <32 x half>, <32 x half>* %a + %idxs = load <32 x i32>, <32 x i32>* %b + %ext = sext <32 x i32> %idxs to <32 x i64> + %byte_ptrs = getelementptr i8, i8* %base, <32 x i64> %ext + %ptrs = bitcast <32 x i8*> %byte_ptrs to <32 x half*> + %mask = fcmp oeq <32 x half> %cvals, zeroinitializer + %vals = call <32 x half> @llvm.masked.gather.v32f16(<32 x half*> %ptrs, i32 8, <32 x i1> %mask, <32 x half> undef) + store <32 x half> %vals, <32 x half>* %a + ret void +} + +define void @masked_gather_32b_unscaled_zext(<32 x half>* %a, <32 x i32>* %b, i8* %base) #0 { +; CHECK-LABEL: masked_gather_32b_unscaled_zext: +; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].h, vl32 +; VBITS_GE_2048-NEXT: ld1h { [[VALS:z[0-9]+]].h }, [[PG0]]/z, [x0] +; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].s, vl32 +; VBITS_GE_2048-NEXT: ld1w { [[PTRS:z[0-9]+]].s }, [[PG1]]/z, [x1] +; VBITS_GE_2048-NEXT: mov [[ZERO:z[0-9]+]].h, #0 +; VBITS_GE_2048-NEXT: fcmeq [[MASK:p[0-9]+]].h, [[PG0]]/z, [[VALS]].h, [[ZERO]] +; VBITS_GE_2048-NEXT: ld1h { [[RES:z[0-9]+]].s }, [[MASK]]/z, [x2, [[PTRS]].s, uxtw] +; VBITS_GE_2048-NEXT: uzp1 [[UZP:z[0-9]+]].h, [[RES]].h, [[RES]].h +; VBITS_GE_2048-NEXT: st1h { [[UZP]].h }, [[PG0]], [x0] +; VBITS_GE_2048-NEXT: ret + %cvals = load <32 x half>, <32 x half>* %a + %idxs = load <32 x i32>, <32 x i32>* %b + %ext = zext <32 x i32> %idxs to <32 x i64> + %byte_ptrs = getelementptr i8, i8* %base, <32 x i64> %ext + %ptrs = bitcast <32 x i8*> %byte_ptrs to <32 x half*> + %mask = fcmp oeq <32 x half> %cvals, zeroinitializer + %vals = call <32 x half> @llvm.masked.gather.v32f16(<32 x half*> %ptrs, i32 8, <32 x i1> %mask, <32 x half> undef) + store <32 x half> %vals, <32 x half>* %a + ret void +} + +define void @masked_gather_64b_scaled(<32 x float>* %a, <32 x i64>* %b, float* %base) #0 { +; CHECK-LABEL: masked_gather_64b_scaled: +; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].s, vl32 +; VBITS_GE_2048-NEXT: ld1w { [[VALS:z[0-9]+]].s }, [[PG0]]/z, [x0] +; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].d, vl32 +; VBITS_GE_2048-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1] +; VBITS_GE_2048-NEXT: mov [[ZERO:z[0-9]+]].s, #0 +; VBITS_GE_2048-NEXT: fcmeq [[MASK:p[0-9]+]].s, [[PG0]]/z, [[VALS]].s, [[ZERO]] +; VBITS_GE_2048-NEXT: ld1w { [[RES:z[0-9]+]].d }, [[MASK]]/z, [x2, [[PTRS]].d, lsl #2] +; VBITS_GE_2048-NEXT: uzp1 [[UZP:z[0-9]+]].s, [[RES]].s, [[RES]].s +; VBITS_GE_2048-NEXT: st1w { [[UZP]].s }, [[PG0]], [x0] +; VBITS_GE_2048-NEXT: ret + %cvals = load <32 x float>, <32 x float>* %a + %idxs = load <32 x i64>, <32 x i64>* %b + %ptrs = getelementptr float, float* %base, <32 x i64> %idxs + %mask = fcmp oeq <32 x float> %cvals, zeroinitializer + %vals = call <32 x float> @llvm.masked.gather.v32f32(<32 x float*> %ptrs, i32 8, <32 x i1> %mask, <32 x float> undef) + store <32 x float> %vals, <32 x float>* %a + ret void +} + +define void @masked_gather_64b_unscaled(<32 x float>* %a, <32 x i64>* %b, i8* %base) #0 { +; CHECK-LABEL: masked_gather_64b_unscaled: +; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].s, vl32 +; VBITS_GE_2048-NEXT: ld1w { [[VALS:z[0-9]+]].s }, [[PG0]]/z, [x0] +; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].d, vl32 +; VBITS_GE_2048-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1] +; VBITS_GE_2048-NEXT: mov [[ZERO:z[0-9]+]].s, #0 +; VBITS_GE_2048-NEXT: fcmeq [[MASK:p[0-9]+]].s, [[PG0]]/z, [[VALS]].s, [[ZERO]] +; VBITS_GE_2048-NEXT: ld1w { [[RES:z[0-9]+]].d }, [[MASK]]/z, [x2, [[PTRS]].d] +; VBITS_GE_2048-NEXT: uzp1 [[UZP:z[0-9]+]].s, [[RES]].s, [[RES]].s +; VBITS_GE_2048-NEXT: st1w { [[UZP]].s }, [[PG0]], [x0] +; VBITS_GE_2048-NEXT: ret + %cvals = load <32 x float>, <32 x float>* %a + %idxs = load <32 x i64>, <32 x i64>* %b + %byte_ptrs = getelementptr i8, i8* %base, <32 x i64> %idxs + %ptrs = bitcast <32 x i8*> %byte_ptrs to <32 x float*> + %mask = fcmp oeq <32 x float> %cvals, zeroinitializer + %vals = call <32 x float> @llvm.masked.gather.v32f32(<32 x float*> %ptrs, i32 8, <32 x i1> %mask, <32 x float> undef) + store <32 x float> %vals, <32 x float>* %a + ret void +} + +; FIXME: This case does not yet codegen well due to deficiencies in opcode selection +define void @masked_gather_vec_plus_reg(<32 x float>* %a, <32 x i8*>* %b, i64 %off) #0 { +; CHECK-LABEL: masked_gather_vec_plus_reg: +; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].s, vl32 +; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].d, vl32 +; VBITS_GE_2048-NEXT: ld1w { [[VALS:z[0-9]+]].s }, [[PG0]]/z, [x0] +; VBITS_GE_2048-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1] +; VBITS_GE_2048-NEXT: mov [[OFF:z[0-9]+]].d, x2 +; VBITS_GE_2048-NEXT: mov [[ZERO:z[0-9]+]].s, #0 +; VBITS_GE_2048-NEXT: add [[PTRS_ADD:z[0-9]+]].d, [[PG1]]/m, [[PTRS]].d, [[OFF]].d +; VBITS_GE_2048-NEXT: fcmeq [[MASK:p[0-9]+]].s, [[PG0]]/z, [[VALS]].s, [[ZERO]] +; VBITS_GE_2048-NEXT: ld1w { [[RES:z[0-9]+]].d }, [[MASK]]/z, {{\[}}[[PTRS_ADD]].d] +; VBITS_GE_2048-NEXT: uzp1 [[UZP:z[0-9]+]].s, [[RES]].s, [[RES]].s +; VBITS_GE_2048-NEXT: st1w { [[UZP]].s }, [[PG0]], [x0] +; VBITS_GE_2048-NEXT: ret + %cvals = load <32 x float>, <32 x float>* %a + %bases = load <32 x i8*>, <32 x i8*>* %b + %byte_ptrs = getelementptr i8, <32 x i8*> %bases, i64 %off + %ptrs = bitcast <32 x i8*> %byte_ptrs to <32 x float*> + %mask = fcmp oeq <32 x float> %cvals, zeroinitializer + %vals = call <32 x float> @llvm.masked.gather.v32f32(<32 x float*> %ptrs, i32 8, <32 x i1> %mask, <32 x float> undef) + store <32 x float> %vals, <32 x float>* %a + ret void +} + +; FIXME: This case does not yet codegen well due to deficiencies in opcode selection +define void @masked_gather_vec_plus_imm(<32 x float>* %a, <32 x i8*>* %b) #0 { +; CHECK-LABEL: masked_gather_vec_plus_imm: +; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].s, vl32 +; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].d, vl32 +; VBITS_GE_2048-NEXT: ld1w { [[VALS:z[0-9]+]].s }, [[PG0]]/z, [x0] +; VBITS_GE_2048-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1] +; VBITS_GE_2048-NEXT: mov [[OFF:z[0-9]+]].d, #4 +; VBITS_GE_2048-NEXT: mov [[ZERO:z[0-9]+]].s, #0 +; VBITS_GE_2048-NEXT: add [[PTRS_ADD:z[0-9]+]].d, [[PG1]]/m, [[PTRS]].d, [[OFF]].d +; VBITS_GE_2048-NEXT: fcmeq [[MASK:p[0-9]+]].s, [[PG0]]/z, [[VALS]].s, [[ZERO]] +; VBITS_GE_2048-NEXT: ld1w { [[RES:z[0-9]+]].d }, [[MASK]]/z, {{\[}}[[PTRS_ADD]].d] +; VBITS_GE_2048-NEXT: uzp1 [[UZP:z[0-9]+]].s, [[RES]].s, [[RES]].s +; VBITS_GE_2048-NEXT: st1w { [[UZP]].s }, [[PG0]], [x0] +; VBITS_GE_2048-NEXT: ret + %cvals = load <32 x float>, <32 x float>* %a + %bases = load <32 x i8*>, <32 x i8*>* %b + %byte_ptrs = getelementptr i8, <32 x i8*> %bases, i64 4 + %ptrs = bitcast <32 x i8*> %byte_ptrs to <32 x float*> + %mask = fcmp oeq <32 x float> %cvals, zeroinitializer + %vals = call <32 x float> @llvm.masked.gather.v32f32(<32 x float*> %ptrs, i32 8, <32 x i1> %mask, <32 x float> undef) + store <32 x float> %vals, <32 x float>* %a + ret void +} + +define void @masked_gather_passthru(<32 x float>* %a, <32 x float*>* %b, <32 x float>* %c) #0 { +; CHECK-LABEL: masked_gather_passthru: +; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].s, vl32 +; VBITS_GE_2048-NEXT: ld1w { [[VALS:z[0-9]+]].s }, [[PG0]]/z, [x0] +; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].d, vl32 +; VBITS_GE_2048-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1] +; VBITS_GE_2048-NEXT: mov [[ZERO:z[0-9]+]].s, #0 +; VBITS_GE_2048-NEXT: fcmeq [[MASK:p[0-9]+]].s, [[PG0]]/z, [[VALS]].s, [[ZERO]] +; VBITS_GE_2048-NEXT: ld1w { [[PT:z[0-9]+]].s }, [[PG0]]/z, [x2] +; VBITS_GE_2048-NEXT: ld1w { [[RES:z[0-9]+]].d }, [[MASK]]/z, {{\[}}[[PTRS]].d] +; VBITS_GE_2048-NEXT: uzp1 [[UZP:z[0-9]+]].s, [[RES]].s, [[RES]].s +; VBITS_GE_2048-NEXT: sel [[SEL:z[0-9]+]].s, [[PG1]], [[UZP]].s, [[PT]].s +; VBITS_GE_2048-NEXT: st1w { [[SEL]].s }, [[PG0]], [x0] +; VBITS_GE_2048-NEXT: ret + %cvals = load <32 x float>, <32 x float>* %a + %ptrs = load <32 x float*>, <32 x float*>* %b + %passthru = load <32 x float>, <32 x float>* %c + %mask = fcmp oeq <32 x float> %cvals, zeroinitializer + %vals = call <32 x float> @llvm.masked.gather.v32f32(<32 x float*> %ptrs, i32 8, <32 x i1> %mask, <32 x float> %passthru) + store <32 x float> %vals, <32 x float>* %a + ret void +} + +define void @masked_gather_passthru_0(<32 x float>* %a, <32 x float*>* %b) #0 { +; CHECK-LABEL: masked_gather_passthru_0: +; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].s, vl32 +; VBITS_GE_2048-NEXT: ld1w { [[VALS:z[0-9]+]].s }, [[PG0]]/z, [x0] +; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].d, vl32 +; VBITS_GE_2048-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1] +; VBITS_GE_2048-NEXT: mov [[ZERO:z[0-9]+]].s, #0 +; VBITS_GE_2048-NEXT: fcmeq [[MASK:p[0-9]+]].s, [[PG0]]/z, [[VALS]].s, [[ZERO]] +; VBITS_GE_2048-NEXT: ld1w { [[RES:z[0-9]+]].d }, [[MASK]]/z, {{\[}}[[PTRS]].d] +; VBITS_GE_2048-NEXT: uzp1 [[UZP:z[0-9]+]].s, [[RES]].s, [[RES]].s +; VBITS_GE_2048-NEXT: st1w { [[UZP]].s }, [[PG0]], [x0] +; VBITS_GE_2048-NEXT: ret + %cvals = load <32 x float>, <32 x float>* %a + %ptrs = load <32 x float*>, <32 x float*>* %b + %mask = fcmp oeq <32 x float> %cvals, zeroinitializer + %vals = call <32 x float> @llvm.masked.gather.v32f32(<32 x float*> %ptrs, i32 8, <32 x i1> %mask, <32 x float> zeroinitializer) + store <32 x float> %vals, <32 x float>* %a + ret void +} + +declare <2 x i8> @llvm.masked.gather.v2i8(<2 x i8*>, i32, <2 x i1>, <2 x i8>) +declare <4 x i8> @llvm.masked.gather.v4i8(<4 x i8*>, i32, <4 x i1>, <4 x i8>) +declare <8 x i8> @llvm.masked.gather.v8i8(<8 x i8*>, i32, <8 x i1>, <8 x i8>) +declare <16 x i8> @llvm.masked.gather.v16i8(<16 x i8*>, i32, <16 x i1>, <16 x i8>) +declare <32 x i8> @llvm.masked.gather.v32i8(<32 x i8*>, i32, <32 x i1>, <32 x i8>) + +declare <2 x i16> @llvm.masked.gather.v2i16(<2 x i16*>, i32, <2 x i1>, <2 x i16>) +declare <4 x i16> @llvm.masked.gather.v4i16(<4 x i16*>, i32, <4 x i1>, <4 x i16>) +declare <8 x i16> @llvm.masked.gather.v8i16(<8 x i16*>, i32, <8 x i1>, <8 x i16>) +declare <16 x i16> @llvm.masked.gather.v16i16(<16 x i16*>, i32, <16 x i1>, <16 x i16>) +declare <32 x i16> @llvm.masked.gather.v32i16(<32 x i16*>, i32, <32 x i1>, <32 x i16>) + +declare <2 x i32> @llvm.masked.gather.v2i32(<2 x i32*>, i32, <2 x i1>, <2 x i32>) +declare <4 x i32> @llvm.masked.gather.v4i32(<4 x i32*>, i32, <4 x i1>, <4 x i32>) +declare <8 x i32> @llvm.masked.gather.v8i32(<8 x i32*>, i32, <8 x i1>, <8 x i32>) +declare <16 x i32> @llvm.masked.gather.v16i32(<16 x i32*>, i32, <16 x i1>, <16 x i32>) +declare <32 x i32> @llvm.masked.gather.v32i32(<32 x i32*>, i32, <32 x i1>, <32 x i32>) + +declare <1 x i64> @llvm.masked.gather.v1i64(<1 x i64*>, i32, <1 x i1>, <1 x i64>) +declare <2 x i64> @llvm.masked.gather.v2i64(<2 x i64*>, i32, <2 x i1>, <2 x i64>) +declare <4 x i64> @llvm.masked.gather.v4i64(<4 x i64*>, i32, <4 x i1>, <4 x i64>) +declare <8 x i64> @llvm.masked.gather.v8i64(<8 x i64*>, i32, <8 x i1>, <8 x i64>) +declare <16 x i64> @llvm.masked.gather.v16i64(<16 x i64*>, i32, <16 x i1>, <16 x i64>) +declare <32 x i64> @llvm.masked.gather.v32i64(<32 x i64*>, i32, <32 x i1>, <32 x i64>) + +declare <2 x half> @llvm.masked.gather.v2f16(<2 x half*>, i32, <2 x i1>, <2 x half>) +declare <4 x half> @llvm.masked.gather.v4f16(<4 x half*>, i32, <4 x i1>, <4 x half>) +declare <8 x half> @llvm.masked.gather.v8f16(<8 x half*>, i32, <8 x i1>, <8 x half>) +declare <16 x half> @llvm.masked.gather.v16f16(<16 x half*>, i32, <16 x i1>, <16 x half>) +declare <32 x half> @llvm.masked.gather.v32f16(<32 x half*>, i32, <32 x i1>, <32 x half>) + +declare <2 x float> @llvm.masked.gather.v2f32(<2 x float*>, i32, <2 x i1>, <2 x float>) +declare <4 x float> @llvm.masked.gather.v4f32(<4 x float*>, i32, <4 x i1>, <4 x float>) +declare <8 x float> @llvm.masked.gather.v8f32(<8 x float*>, i32, <8 x i1>, <8 x float>) +declare <16 x float> @llvm.masked.gather.v16f32(<16 x float*>, i32, <16 x i1>, <16 x float>) +declare <32 x float> @llvm.masked.gather.v32f32(<32 x float*>, i32, <32 x i1>, <32 x float>) + +declare <1 x double> @llvm.masked.gather.v1f64(<1 x double*>, i32, <1 x i1>, <1 x double>) +declare <2 x double> @llvm.masked.gather.v2f64(<2 x double*>, i32, <2 x i1>, <2 x double>) +declare <4 x double> @llvm.masked.gather.v4f64(<4 x double*>, i32, <4 x i1>, <4 x double>) +declare <8 x double> @llvm.masked.gather.v8f64(<8 x double*>, i32, <8 x i1>, <8 x double>) +declare <16 x double> @llvm.masked.gather.v16f64(<16 x double*>, i32, <16 x i1>, <16 x double>) +declare <32 x double> @llvm.masked.gather.v32f64(<32 x double*>, i32, <32 x i1>, <32 x double>) + +attributes #0 = { "target-features"="+sve" } diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-loads.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-loads.ll --- a/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-loads.ll +++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-loads.ll @@ -90,8 +90,6 @@ ; CHECK-NEXT: ld1w { [[Z0:z[0-9]+]].s }, p0/z, [x0] ; CHECK-NEXT: ld1w { [[Z1:z[0-9]+]].s }, p0/z, [x1] ; CHECK-NEXT: fcmeq [[PG1:p[0-9]+]].s, [[PG0]]/z, [[Z0]].s, [[Z1]].s -; CHECK-NEXT: mov [[Z0]].s, [[PG1]]/z, #-1 -; CHECK-NEXT: cmpne [[PG1]].s, [[PG0]]/z, [[Z0]].s, #0 ; CHECK-NEXT: ld1w { [[Z0]].s }, [[PG1]]/z, [x0] ; CHECK-NEXT: st1w { [[Z0]].s }, [[PG0]], [x8] ; CHECK-NEXT: ret @@ -108,8 +106,6 @@ ; VBITS_GE_512-NEXT: ld1w { [[Z0:z[0-9]+]].s }, p0/z, [x0] ; VBITS_GE_512-NEXT: ld1w { [[Z1:z[0-9]+]].s }, p0/z, [x1] ; VBITS_GE_512-NEXT: fcmeq [[PG1:p[0-9]+]].s, [[PG0]]/z, [[Z0]].s, [[Z1]].s -; VBITS_GE_512-NEXT: mov [[Z0]].s, [[PG1]]/z, #-1 -; VBITS_GE_512-NEXT: cmpne [[PG1]].s, [[PG0]]/z, [[Z0]].s, #0 ; VBITS_GE_512-NEXT: ld1w { [[Z0]].s }, [[PG1]]/z, [x{{[0-9]+}}] ; VBITS_GE_512-NEXT: st1w { [[Z0]].s }, [[PG0]], [x8] ; VBITS_GE_512-NEXT: ret @@ -126,8 +122,6 @@ ; VBITS_GE_1024-NEXT: ld1w { [[Z0:z[0-9]+]].s }, p0/z, [x0] ; VBITS_GE_1024-NEXT: ld1w { [[Z1:z[0-9]+]].s }, p0/z, [x1] ; VBITS_GE_1024-NEXT: fcmeq [[PG1:p[0-9]+]].s, [[PG0]]/z, [[Z0]].s, [[Z1]].s -; VBITS_GE_1024-NEXT: mov [[Z0]].s, [[PG1]]/z, #-1 -; VBITS_GE_1024-NEXT: cmpne [[PG1]].s, [[PG0]]/z, [[Z0]].s, #0 ; VBITS_GE_1024-NEXT: ld1w { [[Z0]].s }, [[PG1]]/z, [x{{[0-9]+}}] ; VBITS_GE_1024-NEXT: st1w { [[Z0]].s }, [[PG0]], [x8] ; VBITS_GE_1024-NEXT: ret @@ -144,8 +138,6 @@ ; VBITS_GE_2048-NEXT: ld1w { [[Z0:z[0-9]+]].s }, p0/z, [x0] ; VBITS_GE_2048-NEXT: ld1w { [[Z1:z[0-9]+]].s }, p0/z, [x1] ; VBITS_GE_2048-NEXT: fcmeq [[PG1:p[0-9]+]].s, [[PG0]]/z, [[Z0]].s, [[Z1]].s -; VBITS_GE_2048-NEXT: mov [[Z0]].s, [[PG1]]/z, #-1 -; VBITS_GE_2048-NEXT: cmpne [[PG1]].s, [[PG0]]/z, [[Z0]].s, #0 ; VBITS_GE_2048-NEXT: ld1w { [[Z0]].s }, [[PG1]]/z, [x{{[0-9]+}}] ; VBITS_GE_2048-NEXT: st1w { [[Z0]].s }, [[PG0]], [x8] ; VBITS_GE_2048-NEXT: ret @@ -163,8 +155,6 @@ ; VBITS_GE_512-NEXT: ld1b { [[Z0:z[0-9]+]].b }, p0/z, [x0] ; VBITS_GE_512-NEXT: ld1b { [[Z1:z[0-9]+]].b }, p0/z, [x1] ; VBITS_GE_512-NEXT: cmpeq [[PG1:p[0-9]+]].b, [[PG0]]/z, [[Z0]].b, [[Z1]].b -; VBITS_GE_512-NEXT: mov [[Z0]].b, [[PG1]]/z, #-1 -; VBITS_GE_512-NEXT: cmpne [[PG1]].b, [[PG0]]/z, [[Z0]].b, #0 ; VBITS_GE_512-NEXT: ld1b { [[Z0]].b }, [[PG1]]/z, [x{{[0-9]+}}] ; VBITS_GE_512-NEXT: st1b { [[Z0]].b }, [[PG0]], [x8] ; VBITS_GE_512-NEXT: ret @@ -181,8 +171,6 @@ ; VBITS_GE_512-NEXT: ld1h { [[Z0:z[0-9]+]].h }, p0/z, [x0] ; VBITS_GE_512-NEXT: ld1h { [[Z1:z[0-9]+]].h }, p0/z, [x1] ; VBITS_GE_512-NEXT: cmpeq [[PG1:p[0-9]+]].h, [[PG0]]/z, [[Z0]].h, [[Z1]].h -; VBITS_GE_512-NEXT: mov [[Z0]].h, [[PG1]]/z, #-1 -; VBITS_GE_512-NEXT: cmpne [[PG1]].h, [[PG0]]/z, [[Z0]].h, #0 ; VBITS_GE_512-NEXT: ld1h { [[Z0]].h }, [[PG1]]/z, [x{{[0-9]+}}] ; VBITS_GE_512-NEXT: st1h { [[Z0]].h }, [[PG0]], [x8] ; VBITS_GE_512: ret @@ -199,8 +187,6 @@ ; VBITS_GE_512-NEXT: ld1w { [[Z0:z[0-9]+]].s }, p0/z, [x0] ; VBITS_GE_512-NEXT: ld1w { [[Z1:z[0-9]+]].s }, p0/z, [x1] ; VBITS_GE_512-NEXT: cmpeq [[PG1:p[0-9]+]].s, [[PG0]]/z, [[Z0]].s, [[Z1]].s -; VBITS_GE_512-NEXT: mov [[Z0]].s, [[PG1]]/z, #-1 -; VBITS_GE_512-NEXT: cmpne [[PG1]].s, [[PG0]]/z, [[Z0]].s, #0 ; VBITS_GE_512-NEXT: ld1w { [[Z0]].s }, [[PG1]]/z, [x{{[0-9]+}}] ; VBITS_GE_512-NEXT: st1w { [[Z0]].s }, [[PG0]], [x8] ; VBITS_GE_512-NEXT: ret @@ -217,8 +203,6 @@ ; VBITS_GE_512-NEXT: ld1d { [[Z0:z[0-9]+]].d }, p0/z, [x0] ; VBITS_GE_512-NEXT: ld1d { [[Z1:z[0-9]+]].d }, p0/z, [x1] ; VBITS_GE_512-NEXT: cmpeq [[PG1:p[0-9]+]].d, [[PG0]]/z, [[Z0]].d, [[Z1]].d -; VBITS_GE_512-NEXT: mov [[Z0]].d, [[PG1]]/z, #-1 -; VBITS_GE_512-NEXT: cmpne [[PG1]].d, [[PG0]]/z, [[Z0]].d, #0 ; VBITS_GE_512-NEXT: ld1d { [[Z0]].d }, [[PG1]]/z, [x{{[0-9]+}}] ; VBITS_GE_512-NEXT: st1d { [[Z0]].d }, [[PG0]], [x8] ; VBITS_GE_512-NEXT: ret @@ -235,8 +219,6 @@ ; VBITS_GE_512-NEXT: ld1d { [[Z0:z[0-9]+]].d }, p0/z, [x0] ; VBITS_GE_512-NEXT: ld1d { [[Z1:z[0-9]+]].d }, p0/z, [x1] ; VBITS_GE_512-NEXT: cmpeq [[PG1:p[0-9]+]].d, [[PG0]]/z, [[Z0]].d, [[Z1]].d -; VBITS_GE_512-NEXT: mov [[Z0]].d, [[PG1]]/z, #-1 -; VBITS_GE_512-NEXT: cmpne [[PG1]].d, [[PG0]]/z, [[Z0]].d, #0 ; VBITS_GE_512-NEXT: ld1d { [[Z0]].d }, [[PG1]]/z, [x{{[0-9]+}}] ; VBITS_GE_512-NEXT: sel [[Z2:z[0-9]+]].d, [[PG1]], [[Z0]].d, [[Z1]].d ; VBITS_GE_512-NEXT: st1d { [[Z2]].d }, [[PG0]], [x8] @@ -254,8 +236,6 @@ ; VBITS_GE_512-NEXT: ld1d { [[Z0:z[0-9]+]].d }, p0/z, [x0] ; VBITS_GE_512-NEXT: ld1d { [[Z1:z[0-9]+]].d }, p0/z, [x1] ; VBITS_GE_512-NEXT: fcmeq [[PG1:p[0-9]+]].d, [[PG0]]/z, [[Z0]].d, [[Z1]].d -; VBITS_GE_512-NEXT: mov [[Z0]].d, [[PG1]]/z, #-1 -; VBITS_GE_512-NEXT: cmpne [[PG1]].d, [[PG0]]/z, [[Z0]].d, #0 ; VBITS_GE_512-NEXT: ld1d { [[Z0]].d }, [[PG1]]/z, [x{{[0-9]+}}] ; VBITS_GE_512-NEXT: sel [[Z2:z[0-9]+]].d, [[PG1]], [[Z0]].d, [[Z1]].d ; VBITS_GE_512-NEXT: st1d { [[Z2]].d }, [[PG0]], [x8] @@ -273,12 +253,10 @@ ; VBITS_GE_512-NEXT: ld1b { [[Z0:z[0-9]+]].b }, p0/z, [x0] ; VBITS_GE_512-NEXT: ld1b { [[Z1:z[0-9]+]].b }, p0/z, [x1] ; VBITS_GE_512-NEXT: cmpeq [[PG1:p[0-9]+]].b, [[PG0]]/z, [[Z0]].b, [[Z1]].b -; VBITS_GE_512-NEXT: mov [[Z0]].b, [[PG1]]/z, #-1 -; VBITS_GE_512-NEXT: cmpne [[PG2:p[0-9]+]].b, [[PG0]]/z, [[Z0]].b, #0 -; VBITS_GE_512-NEXT: ld1b { [[Z0]].b }, [[PG2]]/z, [x{{[0-9]+}}] +; VBITS_GE_512-NEXT: ld1b { [[Z0]].b }, [[PG1]]/z, [x{{[0-9]+}}] ; VBITS_GE_512-NEXT: ptrue [[PG2:p[0-9]+]].h, vl32 ; VBITS_GE_512-NEXT: sunpklo [[Z0]].h, [[Z0]].b -; VBITS_GE_512-NEXT: st1h { [[Z0]].h }, [[PG2]], [x8] +; VBITS_GE_512-NEXT: st1h { [[Z0]].h }, [[PG1]], [x8] ; VBITS_GE_512-NEXT: ret %a = load <32 x i8>, <32 x i8>* %ap %b = load <32 x i8>, <32 x i8>* %bp @@ -337,12 +315,10 @@ ; VBITS_GE_512-NEXT: ld1h { [[Z0:z[0-9]+]].h }, p0/z, [x0] ; VBITS_GE_512-NEXT: ld1h { [[Z1:z[0-9]+]].h }, p0/z, [x1] ; VBITS_GE_512-NEXT: cmpeq [[PG1:p[0-9]+]].h, [[PG0]]/z, [[Z0]].h, [[Z1]].h -; VBITS_GE_512-NEXT: mov [[Z0]].h, [[PG1]]/z, #-1 -; VBITS_GE_512-NEXT: cmpne [[PG2:p[0-9]+]].h, [[PG0]]/z, [[Z0]].h, #0 -; VBITS_GE_512-NEXT: ld1h { [[Z0]].h }, [[PG2]]/z, [x{{[0-9]+}}] +; VBITS_GE_512-NEXT: ld1h { [[Z0]].h }, [[PG1]]/z, [x{{[0-9]+}}] ; VBITS_GE_512-NEXT: ptrue [[PG2:p[0-9]+]].s, vl16 ; VBITS_GE_512-NEXT: sunpklo [[Z0]].s, [[Z0]].h -; VBITS_GE_512-NEXT: st1w { [[Z0]].s }, [[PG2]], [x8] +; VBITS_GE_512-NEXT: st1w { [[Z0]].s }, [[PG1]], [x8] ; VBITS_GE_512-NEXT: ret %a = load <16 x i16>, <16 x i16>* %ap %b = load <16 x i16>, <16 x i16>* %bp @@ -379,12 +355,10 @@ ; VBITS_GE_512-NEXT: ld1w { [[Z0:z[0-9]+]].s }, p0/z, [x0] ; VBITS_GE_512-NEXT: ld1w { [[Z1:z[0-9]+]].s }, p0/z, [x1] ; VBITS_GE_512-NEXT: cmpeq [[PG1:p[0-9]+]].s, [[PG0]]/z, [[Z0]].s, [[Z1]].s -; VBITS_GE_512-NEXT: mov [[Z0]].s, [[PG1]]/z, #-1 -; VBITS_GE_512-NEXT: cmpne [[PG2:p[0-9]+]].s, [[PG0]]/z, [[Z0]].s, #0 -; VBITS_GE_512-NEXT: ld1w { [[Z0]].s }, [[PG2]]/z, [x{{[0-9]+}}] +; VBITS_GE_512-NEXT: ld1w { [[Z0]].s }, [[PG1]]/z, [x{{[0-9]+}}] ; VBITS_GE_512-NEXT: ptrue [[PG2:p[0-9]+]].d, vl8 ; VBITS_GE_512-NEXT: sunpklo [[Z0]].d, [[Z0]].s -; VBITS_GE_512-NEXT: st1d { [[Z0]].d }, [[PG2]], [x8] +; VBITS_GE_512-NEXT: st1d { [[Z0]].d }, [[PG1]], [x8] ; VBITS_GE_512-NEXT: ret %a = load <8 x i32>, <8 x i32>* %ap %b = load <8 x i32>, <8 x i32>* %bp @@ -400,12 +374,10 @@ ; VBITS_GE_512-NEXT: ld1b { [[Z0:z[0-9]+]].b }, p0/z, [x0] ; VBITS_GE_512-NEXT: ld1b { [[Z1:z[0-9]+]].b }, p0/z, [x1] ; VBITS_GE_512-NEXT: cmpeq [[PG1:p[0-9]+]].b, [[PG0]]/z, [[Z0]].b, [[Z1]].b -; VBITS_GE_512-NEXT: mov [[Z0]].b, [[PG1]]/z, #-1 -; VBITS_GE_512-NEXT: cmpne [[PG2:p[0-9]+]].b, [[PG0]]/z, [[Z0]].b, #0 -; VBITS_GE_512-NEXT: ld1b { [[Z0]].b }, [[PG2]]/z, [x{{[0-9]+}}] +; VBITS_GE_512-NEXT: ld1b { [[Z0]].b }, [[PG1]]/z, [x{{[0-9]+}}] ; VBITS_GE_512-NEXT: ptrue [[PG2:p[0-9]+]].h, vl32 ; VBITS_GE_512-NEXT: uunpklo [[Z0]].h, [[Z0]].b -; VBITS_GE_512-NEXT: st1h { [[Z0]].h }, [[PG2]], [x8] +; VBITS_GE_512-NEXT: st1h { [[Z0]].h }, [[PG1]], [x8] ; VBITS_GE_512-NEXT: ret %a = load <32 x i8>, <32 x i8>* %ap %b = load <32 x i8>, <32 x i8>* %bp @@ -464,12 +436,10 @@ ; VBITS_GE_512-NEXT: ld1h { [[Z0:z[0-9]+]].h }, p0/z, [x0] ; VBITS_GE_512-NEXT: ld1h { [[Z1:z[0-9]+]].h }, p0/z, [x1] ; VBITS_GE_512-NEXT: cmpeq [[PG1:p[0-9]+]].h, [[PG0]]/z, [[Z0]].h, [[Z1]].h -; VBITS_GE_512-NEXT: mov [[Z0]].h, [[PG1]]/z, #-1 -; VBITS_GE_512-NEXT: cmpne [[PG2:p[0-9]+]].h, [[PG0]]/z, [[Z0]].h, #0 -; VBITS_GE_512-NEXT: ld1h { [[Z0]].h }, [[PG2]]/z, [x{{[0-9]+}}] +; VBITS_GE_512-NEXT: ld1h { [[Z0]].h }, [[PG1]]/z, [x{{[0-9]+}}] ; VBITS_GE_512-NEXT: ptrue [[PG2:p[0-9]+]].s, vl16 ; VBITS_GE_512-NEXT: uunpklo [[Z0]].s, [[Z0]].h -; VBITS_GE_512-NEXT: st1w { [[Z0]].s }, [[PG2]], [x8] +; VBITS_GE_512-NEXT: st1w { [[Z0]].s }, [[PG1]], [x8] ; VBITS_GE_512-NEXT: ret %a = load <16 x i16>, <16 x i16>* %ap %b = load <16 x i16>, <16 x i16>* %bp @@ -506,12 +476,10 @@ ; VBITS_GE_512-NEXT: ld1w { [[Z0:z[0-9]+]].s }, p0/z, [x0] ; VBITS_GE_512-NEXT: ld1w { [[Z1:z[0-9]+]].s }, p0/z, [x1] ; VBITS_GE_512-NEXT: cmpeq [[PG1:p[0-9]+]].s, [[PG0]]/z, [[Z0]].s, [[Z1]].s -; VBITS_GE_512-NEXT: mov [[Z0]].s, [[PG1]]/z, #-1 -; VBITS_GE_512-NEXT: cmpne [[PG2:p[0-9]+]].s, [[PG0]]/z, [[Z0]].s, #0 -; VBITS_GE_512-NEXT: ld1w { [[Z0]].s }, [[PG2]]/z, [x{{[0-9]+}}] +; VBITS_GE_512-NEXT: ld1w { [[Z0]].s }, [[PG1]]/z, [x{{[0-9]+}}] ; VBITS_GE_512-NEXT: ptrue [[PG2:p[0-9]+]].d, vl8 ; VBITS_GE_512-NEXT: uunpklo [[Z0]].d, [[Z0]].s -; VBITS_GE_512-NEXT: st1d { [[Z0]].d }, [[PG2]], [x8] +; VBITS_GE_512-NEXT: st1d { [[Z0]].d }, [[PG1]], [x8] ; VBITS_GE_512-NEXT: ret %a = load <8 x i32>, <8 x i32>* %ap %b = load <8 x i32>, <8 x i32>* %bp diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-scatter.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-scatter.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-scatter.ll @@ -0,0 +1,924 @@ +; RUN: llc -aarch64-sve-vector-bits-min=128 -asm-verbose=0 < %s | FileCheck %s -check-prefix=NO_SVE +; RUN: llc -aarch64-sve-vector-bits-min=256 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK +; RUN: llc -aarch64-sve-vector-bits-min=384 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK +; RUN: llc -aarch64-sve-vector-bits-min=512 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512 +; RUN: llc -aarch64-sve-vector-bits-min=640 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512 +; RUN: llc -aarch64-sve-vector-bits-min=768 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512 +; RUN: llc -aarch64-sve-vector-bits-min=896 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512 +; RUN: llc -aarch64-sve-vector-bits-min=1024 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512 +; RUN: llc -aarch64-sve-vector-bits-min=1152 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512 +; RUN: llc -aarch64-sve-vector-bits-min=1280 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512 +; RUN: llc -aarch64-sve-vector-bits-min=1408 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512 +; RUN: llc -aarch64-sve-vector-bits-min=1536 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512 +; RUN: llc -aarch64-sve-vector-bits-min=1664 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512 +; RUN: llc -aarch64-sve-vector-bits-min=1792 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512 +; RUN: llc -aarch64-sve-vector-bits-min=1920 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512 +; RUN: llc -aarch64-sve-vector-bits-min=2048 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_2048,VBITS_GE_1024,VBITS_GE_512 + +target triple = "aarch64-unknown-linux-gnu" + +; Don't use SVE when its registers are no bigger than NEON. +; NO_SVE-NOT: ptrue + +; +; ST1B +; + +define void @masked_scatter_v2i8(<2 x i8>* %a, <2 x i8*>* %b) #0 { +; CHECK-LABEL: masked_scatter_v2i8: +; CHECK: ldrb [[VALS_LO:w[0-9]+]], [x0] +; CHECK-NEXT: ldrb [[VALS_HI:w[0-9]+]], [x0, #1] +; CHECK-NEXT: ldr q[[PTRS:[0-9]+]], [x1] +; CHECK-NEXT: ptrue [[PG0:p[0-9]+]].s, vl2 +; CHECK-NEXT: fmov s[[VALS:[0-9]+]], [[VALS_LO]] +; CHECK-NEXT: mov v[[VALS]].s[1], [[VALS_HI]] +; CHECK-NEXT: cmeq v[[CMP:[0-9]+]].2s, v[[VALS]].2s, #0 +; CHECK-NEXT: cmpne [[MASK:p[0-9]+]].s, [[PG0]]/z, z[[CMP]].s, #0 +; CHECK-NEXT: ushll v[[SHL:[0-9]+]].2d, v[[VALS]].2s, #0 +; CHECK-NEXT: st1b { z[[SHL]].d }, [[MASK]], [z[[PTRS]].d] +; CHECK-NEXT: ret + %vals = load <2 x i8>, <2 x i8>* %a + %ptrs = load <2 x i8*>, <2 x i8*>* %b + %mask = icmp eq <2 x i8> %vals, zeroinitializer + call void @llvm.masked.scatter.v2i8(<2 x i8> %vals, <2 x i8*> %ptrs, i32 8, <2 x i1> %mask) + ret void +} + +define void @masked_scatter_v4i8(<4 x i8>* %a, <4 x i8*>* %b) #0 { +; CHECK-LABEL: masked_scatter_v4i8: +; CHECK: ldrb [[VALS_LO1:w[0-9]+]], [x0] +; CHECK-NEXT: ldrb [[VALS_LO2:w[0-9]+]], [x0, #1] +; CHECK-NEXT: ldrb [[VALS_HI1:w[0-9]+]], [x0, #2] +; CHECK-NEXT: ldrb [[VALS_HI2:w[0-9]+]], [x0, #3] +; CHECK-NEXT: fmov s[[VALS:[0-9]+]], [[VALS_LO1]] +; CHECK-NEXT: ptrue [[PG0:p[0-9]+]].d, vl4 +; CHECK-NEXT: mov v[[VALS]].h[1], [[VALS_LO2]] +; CHECK-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG0]]/z, [x1] +; CHECK-NEXT: mov v[[VALS]].h[2], [[VALS_HI1]] +; CHECK-NEXT: mov v[[VALS]].h[3], [[VALS_HI2]] +; CHECK-NEXT: ptrue [[PG1:p[0-9]+]].h, vl4 +; CHECK-NEXT: cmeq v[[CMP:[0-9]+]].4h, v[[VALS]].4h, #0 +; CHECK-NEXT: uunpklo [[UPK1:z[0-9]+]].s, z[[VALS]].h +; CHECK-NEXT: cmpne [[MASK:p[0-9]+]].h, [[PG1]]/z, z[[CMP]].h, #0 +; CHECK-NEXT: uunpklo [[UPK2:z[0-9]+]].d, [[UPK1]].s +; CHECK-NEXT: st1b { [[UPK2]].d }, [[MASK]], {{\[}}[[PTRS]].d] +; CHECK-NEXT: ret + %vals = load <4 x i8>, <4 x i8>* %a + %ptrs = load <4 x i8*>, <4 x i8*>* %b + %mask = icmp eq <4 x i8> %vals, zeroinitializer + call void @llvm.masked.scatter.v4i8(<4 x i8> %vals, <4 x i8*> %ptrs, i32 8, <4 x i1> %mask) + ret void +} + +define void @masked_scatter_v8i8(<8 x i8>* %a, <8 x i8*>* %b) #0 { +; CHECK-LABEL: masked_scatter_v8i8: +; VBITS_GE_512: ldr d[[VALS:[0-9]+]], [x0] +; VBITS_GE_512-NEXT: ptrue [[PG0:p[0-9]+]].d, vl8 +; VBITS_GE_512-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG0]]/z, [x1] +; VBITS_GE_512-NEXT: ptrue [[PG1:p[0-9]+]].b, vl8 +; VBITS_GE_512-NEXT: cmeq v[[CMP:[0-9]+]].8b, v[[VALS]].8b, #0 +; VBITS_GE_512-NEXT: uunpklo [[UPK1:z[0-9]+]].h, z[[VALS]].b +; VBITS_GE_512-NEXT: uunpklo [[UPK2:z[0-9]+]].s, [[UPK1]].h +; VBITS_GE_512-NEXT: cmpne [[MASK:p[0-9]+]].b, [[PG1]]/z, z[[CMP]].b, #0 +; VBITS_GE_512-NEXT: uunpklo [[UPK3:z[0-9]+]].d, [[UPK2]].s +; VBITS_GE_512-NEXT: st1b { [[UPK3]].d }, [[MASK]], {{\[}}[[PTRS]].d] +; VBITS_BE_512-NEXT: ret + %vals = load <8 x i8>, <8 x i8>* %a + %ptrs = load <8 x i8*>, <8 x i8*>* %b + %mask = icmp eq <8 x i8> %vals, zeroinitializer + call void @llvm.masked.scatter.v8i8(<8 x i8> %vals, <8 x i8*> %ptrs, i32 8, <8 x i1> %mask) + ret void +} + +define void @masked_scatter_v16i8(<16 x i8>* %a, <16 x i8*>* %b) #0 { +; CHECK-LABEL: masked_scatter_v16i8: +; VBITS_GE_1024: ldr q[[VALS:[0-9]+]], [x0] +; VBITS_GE_1024-NEXT: ptrue [[PG0:p[0-9]+]].d, vl16 +; VBITS_GE_1024-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG0]]/z, [x1] +; VBITS_GE_1024-NEXT: ptrue [[PG1:p[0-9]+]].b, vl16 +; VBITS_GE_1024-NEXT: cmeq v[[CMP:[0-9]+]].16b, v[[VALS]].16b, #0 +; VBITS_GE_1024-NEXT: uunpklo [[UPK1:z[0-9]+]].h, z[[VALS]].b +; VBITS_GE_1024-NEXT: uunpklo [[UPK2:z[0-9]+]].s, [[UPK1]].h +; VBITS_GE_1024-NEXT: cmpne [[MASK:p[0-9]+]].b, [[PG1]]/z, z[[CMP]].b, #0 +; VBITS_GE_1024-NEXT: uunpklo [[UPK3:z[0-9]+]].d, [[UPK2]].s +; VBITS_GE_1024-NEXT: st1b { [[UPK3]].d }, [[MASK]], {{\[}}[[PTRS]].d] +; VBITS_BE_1024-NEXT: ret + %vals = load <16 x i8>, <16 x i8>* %a + %ptrs = load <16 x i8*>, <16 x i8*>* %b + %mask = icmp eq <16 x i8> %vals, zeroinitializer + call void @llvm.masked.scatter.v16i8(<16 x i8> %vals, <16 x i8*> %ptrs, i32 8, <16 x i1> %mask) + ret void +} + +define void @masked_scatter_v32i8(<32 x i8>* %a, <32 x i8*>* %b) #0 { +; CHECK-LABEL: masked_scatter_v32i8: +; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].b, vl32 +; VBITS_GE_2048-NEXT: ld1b { [[VALS:z[0-9]+]].b }, [[PG0]]/z, [x0] +; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].d, vl32 +; VBITS_GE_2048-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1] +; VBITS_GE_2048-NEXT: cmpeq [[MASK:p[0-9]+]].b, [[PG0]]/z, [[VALS]].b, #0 +; VBITS_GE_2048-NEXT: uunpklo [[UPK1:z[0-9]+]].h, [[VALS]].b +; VBITS_GE_2048-NEXT: uunpklo [[UPK2:z[0-9]+]].s, [[UPK1]].h +; VBITS_GE_2048-NEXT: uunpklo [[UPK3:z[0-9]+]].d, [[UPK2]].s +; VBITS_GE_2048-NEXT: st1b { [[UPK3]].d }, [[MASK]], {{\[}}[[PTRS]].d] +; VBITS_BE_2048-NEXT: ret + %vals = load <32 x i8>, <32 x i8>* %a + %ptrs = load <32 x i8*>, <32 x i8*>* %b + %mask = icmp eq <32 x i8> %vals, zeroinitializer + call void @llvm.masked.scatter.v32i8(<32 x i8> %vals, <32 x i8*> %ptrs, i32 8, <32 x i1> %mask) + ret void +} + +; +; ST1H +; + +define void @masked_scatter_v2i16(<2 x i16>* %a, <2 x i16*>* %b) #0 { +; CHECK-LABEL: masked_scatter_v2i16: +; CHECK: ldrh [[VALS_LO:w[0-9]+]], [x0] +; CHECK-NEXT: ldrh [[VALS_HI:w[0-9]+]], [x0, #2] +; CHECK-NEXT: ldr q[[PTRS:[0-9]+]], [x1] +; CHECK-NEXT: ptrue [[PG0:p[0-9]+]].s, vl2 +; CHECK-NEXT: fmov s[[VALS:[0-9]+]], [[VALS_LO]] +; CHECK-NEXT: mov v[[VALS]].s[1], [[VALS_HI]] +; CHECK-NEXT: cmeq v[[CMP:[0-9]+]].2s, v[[VALS]].2s, #0 +; CHECK-NEXT: cmpne [[MASK:p[0-9]+]].s, [[PG0]]/z, z[[CMP]].s, #0 +; CHECK-NEXT: ushll v[[SHL:[0-9]+]].2d, v[[VALS]].2s, #0 +; CHECK-NEXT: st1h { z[[SHL]].d }, [[MASK]], [z[[PTRS]].d] +; CHECK-NEXT: ret + %vals = load <2 x i16>, <2 x i16>* %a + %ptrs = load <2 x i16*>, <2 x i16*>* %b + %mask = icmp eq <2 x i16> %vals, zeroinitializer + call void @llvm.masked.scatter.v2i16(<2 x i16> %vals, <2 x i16*> %ptrs, i32 8, <2 x i1> %mask) + ret void +} + +define void @masked_scatter_v4i16(<4 x i16>* %a, <4 x i16*>* %b) #0 { +; CHECK-LABEL: masked_scatter_v4i16: +; CHECK: ldr d[[VALS:[0-9]+]], [x0] +; CHECK-NEXT: ptrue [[PG0:p[0-9]+]].d, vl4 +; CHECK-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG0]]/z, [x1] +; CHECK-NEXT: ptrue [[PG1:p[0-9]+]].h, vl4 +; CHECK-NEXT: cmeq v[[CMP:[0-9]+]].4h, v[[VALS]].4h, #0 +; CHECK-NEXT: uunpklo [[UPK1:z[0-9]+]].s, z[[VALS]].h +; CHECK-NEXT: cmpne [[MASK:p[0-9]+]].h, [[PG1]]/z, z[[CMP]].h, #0 +; CHECK-NEXT: uunpklo [[UPK2:z[0-9]+]].d, [[UPK1]].s +; CHECK-NEXT: st1h { [[UPK2]].d }, [[MASK]], {{\[}}[[PTRS]].d] +; CHECK-NEXT: ret + %vals = load <4 x i16>, <4 x i16>* %a + %ptrs = load <4 x i16*>, <4 x i16*>* %b + %mask = icmp eq <4 x i16> %vals, zeroinitializer + call void @llvm.masked.scatter.v4i16(<4 x i16> %vals, <4 x i16*> %ptrs, i32 8, <4 x i1> %mask) + ret void +} + +define void @masked_scatter_v8i16(<8 x i16>* %a, <8 x i16*>* %b) #0 { +; CHECK-LABEL: masked_scatter_v8i16: +; VBITS_GE_512: ldr q[[VALS:[0-9]+]], [x0] +; VBITS_GE_512-NEXT: ptrue [[PG0:p[0-9]+]].d, vl8 +; VBITS_GE_512-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG0]]/z, [x1] +; VBITS_GE_512-NEXT: ptrue [[PG1:p[0-9]+]].h, vl8 +; VBITS_GE_512-NEXT: cmeq v[[CMP:[0-9]+]].8h, v[[VALS]].8h, #0 +; VBITS_GE_512-NEXT: uunpklo [[UPK1:z[0-9]+]].s, z[[VALS]].h +; VBITS_GE_512-NEXT: cmpne [[MASK:p[0-9]+]].h, [[PG1]]/z, z[[CMP]].h, #0 +; VBITS_GE_512-NEXT: uunpklo [[UPK2:z[0-9]+]].d, [[UPK1]].s +; VBITS_GE_512-NEXT: st1h { [[UPK2]].d }, [[MASK]], {{\[}}[[PTRS]].d] +; VBITS_BE_512-NEXT: ret + %vals = load <8 x i16>, <8 x i16>* %a + %ptrs = load <8 x i16*>, <8 x i16*>* %b + %mask = icmp eq <8 x i16> %vals, zeroinitializer + call void @llvm.masked.scatter.v8i16(<8 x i16> %vals, <8 x i16*> %ptrs, i32 8, <8 x i1> %mask) + ret void +} + +define void @masked_scatter_v16i16(<16 x i16>* %a, <16 x i16*>* %b) #0 { +; CHECK-LABEL: masked_scatter_v16i16: +; VBITS_GE_1024: ptrue [[PG0:p[0-9]+]].h, vl16 +; VBITS_GE_1024-NEXT: ld1h { [[VALS:z[0-9]+]].h }, [[PG0]]/z, [x0] +; VBITS_GE_1024-NEXT: ptrue [[PG1:p[0-9]+]].d, vl16 +; VBITS_GE_1024-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1] +; VBITS_GE_1024-NEXT: cmpeq [[MASK:p[0-9]+]].h, [[PG0]]/z, [[VALS]].h, #0 +; VBITS_GE_1024-NEXT: uunpklo [[UPK1:z[0-9]+]].s, [[VALS]].h +; VBITS_GE_1024-NEXT: uunpklo [[UPK2:z[0-9]+]].d, [[UPK1]].s +; VBITS_GE_1024-NEXT: st1h { [[UPK2]].d }, [[MASK]], {{\[}}[[PTRS]].d] +; VBITS_BE_1024-NEXT: ret + %vals = load <16 x i16>, <16 x i16>* %a + %ptrs = load <16 x i16*>, <16 x i16*>* %b + %mask = icmp eq <16 x i16> %vals, zeroinitializer + call void @llvm.masked.scatter.v16i16(<16 x i16> %vals, <16 x i16*> %ptrs, i32 8, <16 x i1> %mask) + ret void +} + +define void @masked_scatter_v32i16(<32 x i16>* %a, <32 x i16*>* %b) #0 { +; CHECK-LABEL: masked_scatter_v32i16: +; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].h, vl32 +; VBITS_GE_2048-NEXT: ld1h { [[VALS:z[0-9]+]].h }, [[PG0]]/z, [x0] +; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].d, vl32 +; VBITS_GE_2048-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1] +; VBITS_GE_2048-NEXT: cmpeq [[MASK:p[0-9]+]].h, [[PG0]]/z, [[VALS]].h, #0 +; VBITS_GE_2048-NEXT: uunpklo [[UPK1:z[0-9]+]].s, [[VALS]].h +; VBITS_GE_2048-NEXT: uunpklo [[UPK2:z[0-9]+]].d, [[UPK1]].s +; VBITS_GE_2048-NEXT: st1h { [[UPK2]].d }, [[MASK]], {{\[}}[[PTRS]].d] +; VBITS_BE_2048-NEXT: ret + %vals = load <32 x i16>, <32 x i16>* %a + %ptrs = load <32 x i16*>, <32 x i16*>* %b + %mask = icmp eq <32 x i16> %vals, zeroinitializer + call void @llvm.masked.scatter.v32i16(<32 x i16> %vals, <32 x i16*> %ptrs, i32 8, <32 x i1> %mask) + ret void +} + +; +; ST1W +; + +define void @masked_scatter_v2i32(<2 x i32>* %a, <2 x i32*>* %b) #0 { +; CHECK-LABEL: masked_scatter_v2i32: +; CHECK: ldr d[[VALS:[0-9]+]], [x0] +; CHECK-NEXT: ldr q[[PTRS:[0-9]+]], [x1] +; CHECK-NEXT: ptrue [[PG0:p[0-9]+]].s, vl2 +; CHECK-NEXT: cmeq v[[CMP:[0-9]+]].2s, v[[VALS]].2s, #0 +; CHECK-NEXT: cmpne [[MASK:p[0-9]+]].s, [[PG0]]/z, z[[CMP]].s, #0 +; CHECK-NEXT: ushll v[[SHL:[0-9]+]].2d, v[[VALS]].2s, #0 +; CHECK-NEXT: st1w { z[[SHL]].d }, [[MASK]], [z[[PTRS]].d] +; CHECK-NEXT: ret + %vals = load <2 x i32>, <2 x i32>* %a + %ptrs = load <2 x i32*>, <2 x i32*>* %b + %mask = icmp eq <2 x i32> %vals, zeroinitializer + call void @llvm.masked.scatter.v2i32(<2 x i32> %vals, <2 x i32*> %ptrs, i32 8, <2 x i1> %mask) + ret void +} + +define void @masked_scatter_v4i32(<4 x i32>* %a, <4 x i32*>* %b) #0 { +; CHECK-LABEL: masked_scatter_v4i32: +; CHECK: ldr q[[VALS:[0-9]+]], [x0] +; CHECK-NEXT: ptrue [[PG0:p[0-9]+]].d, vl4 +; CHECK-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG0]]/z, [x1] +; CHECK-NEXT: ptrue [[PG1:p[0-9]+]].s, vl4 +; CHECK-NEXT: cmeq v[[CMP:[0-9]+]].4s, v[[VALS]].4s, #0 +; CHECK-NEXT: cmpne [[MASK:p[0-9]+]].s, [[PG1]]/z, z[[CMP]].s, #0 +; CHECK-NEXT: uunpklo [[UPK:z[0-9]+]].d, z[[VALS]].s +; CHECK-NEXT: st1w { [[UPK]].d }, [[MASK]], {{\[}}[[PTRS]].d] +; CHECK-NEXT: ret + %vals = load <4 x i32>, <4 x i32>* %a + %ptrs = load <4 x i32*>, <4 x i32*>* %b + %mask = icmp eq <4 x i32> %vals, zeroinitializer + call void @llvm.masked.scatter.v4i32(<4 x i32> %vals, <4 x i32*> %ptrs, i32 8, <4 x i1> %mask) + ret void +} + +define void @masked_scatter_v8i32(<8 x i32>* %a, <8 x i32*>* %b) #0 { +; CHECK-LABEL: masked_scatter_v8i32: +; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].s, vl8 +; VBITS_GE_512-NEXT: ld1w { [[VALS:z[0-9]+]].s }, [[PG0]]/z, [x0] +; VBITS_GE_512-NEXT: ptrue [[PG1:p[0-9]+]].d, vl8 +; VBITS_GE_512-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1] +; VBITS_GE_512-NEXT: cmpeq [[MASK:p[0-9]+]].s, [[PG0]]/z, [[VALS]].s, #0 +; VBITS_GE_512-NEXT: uunpklo [[UPK:z[0-9]+]].d, [[VALS]].s +; VBITS_GE_512-NEXT: st1w { [[UPK]].d }, [[MASK]], {{\[}}[[PTRS]].d] +; VBITS_BE_512-NEXT: ret + %vals = load <8 x i32>, <8 x i32>* %a + %ptrs = load <8 x i32*>, <8 x i32*>* %b + %mask = icmp eq <8 x i32> %vals, zeroinitializer + call void @llvm.masked.scatter.v8i32(<8 x i32> %vals, <8 x i32*> %ptrs, i32 8, <8 x i1> %mask) + ret void +} + +define void @masked_scatter_v16i32(<16 x i32>* %a, <16 x i32*>* %b) #0 { +; CHECK-LABEL: masked_scatter_v16i32: +; VBITS_GE_1024: ptrue [[PG0:p[0-9]+]].s, vl16 +; VBITS_GE_1024-NEXT: ld1w { [[VALS:z[0-9]+]].s }, [[PG0]]/z, [x0] +; VBITS_GE_1024-NEXT: ptrue [[PG1:p[0-9]+]].d, vl16 +; VBITS_GE_1024-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1] +; VBITS_GE_1024-NEXT: cmpeq [[MASK:p[0-9]+]].s, [[PG0]]/z, [[VALS]].s, #0 +; VBITS_GE_1024-NEXT: uunpklo [[UPK:z[0-9]+]].d, [[VALS]].s +; VBITS_GE_1024-NEXT: st1w { [[UPK]].d }, [[MASK]], {{\[}}[[PTRS]].d] +; VBITS_BE_1024-NEXT: ret + %vals = load <16 x i32>, <16 x i32>* %a + %ptrs = load <16 x i32*>, <16 x i32*>* %b + %mask = icmp eq <16 x i32> %vals, zeroinitializer + call void @llvm.masked.scatter.v16i32(<16 x i32> %vals, <16 x i32*> %ptrs, i32 8, <16 x i1> %mask) + ret void +} + +define void @masked_scatter_v32i32(<32 x i32>* %a, <32 x i32*>* %b) #0 { +; CHECK-LABEL: masked_scatter_v32i32: +; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].s, vl32 +; VBITS_GE_2048-NEXT: ld1w { [[VALS:z[0-9]+]].s }, [[PG0]]/z, [x0] +; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].d, vl32 +; VBITS_GE_2048-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1] +; VBITS_GE_2048-NEXT: cmpeq [[MASK:p[0-9]+]].s, [[PG0]]/z, [[VALS]].s, #0 +; VBITS_GE_2048-NEXT: uunpklo [[UPK:z[0-9]+]].d, [[VALS]].s +; VBITS_GE_2048-NEXT: st1w { [[UPK]].d }, [[MASK]], {{\[}}[[PTRS]].d] +; VBITS_BE_2048-NEXT: ret + %vals = load <32 x i32>, <32 x i32>* %a + %ptrs = load <32 x i32*>, <32 x i32*>* %b + %mask = icmp eq <32 x i32> %vals, zeroinitializer + call void @llvm.masked.scatter.v32i32(<32 x i32> %vals, <32 x i32*> %ptrs, i32 8, <32 x i1> %mask) + ret void +} + +; +; ST1D +; + +; Scalarize 1 x i64 scatters +define void @masked_scatter_v1i64(<1 x i64>* %a, <1 x i64*>* %b) #0 { +; CHECK-LABEL: masked_scatter_v1i64: +; CHECK-NOT: ptrue + %vals = load <1 x i64>, <1 x i64>* %a + %ptrs = load <1 x i64*>, <1 x i64*>* %b + %mask = icmp eq <1 x i64> %vals, zeroinitializer + call void @llvm.masked.scatter.v1i64(<1 x i64> %vals, <1 x i64*> %ptrs, i32 8, <1 x i1> %mask) + ret void +} + +define void @masked_scatter_v2i64(<2 x i64>* %a, <2 x i64*>* %b) #0 { +; CHECK-LABEL: masked_scatter_v2i64: +; CHECK: ldr q[[VALS:[0-9]+]], [x0] +; CHECK-NEXT: ldr q[[PTRS:[0-9]+]], [x1] +; CHECK-NEXT: ptrue [[PG0:p[0-9]+]].d, vl2 +; CHECK-NEXT: cmeq v[[CMP:[0-9]+]].2d, v[[VALS]].2d, #0 +; CHECK-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG0]]/z, z[[CMP]].d, #0 +; CHECK-NEXT: st1d { z[[VALS]].d }, [[MASK]], [z[[PTRS]].d] +; CHECK-NEXT: ret + %vals = load <2 x i64>, <2 x i64>* %a + %ptrs = load <2 x i64*>, <2 x i64*>* %b + %mask = icmp eq <2 x i64> %vals, zeroinitializer + call void @llvm.masked.scatter.v2i64(<2 x i64> %vals, <2 x i64*> %ptrs, i32 8, <2 x i1> %mask) + ret void +} + +define void @masked_scatter_v4i64(<4 x i64>* %a, <4 x i64*>* %b) #0 { +; CHECK-LABEL: masked_scatter_v4i64: +; CHECK: ptrue [[PG0:p[0-9]+]].d, vl4 +; CHECK-NEXT: ld1d { [[VALS:z[0-9]+]].d }, [[PG0]]/z, [x0] +; CHECK-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG0]]/z, [x1] +; CHECK-NEXT: cmpeq [[MASK:p[0-9]+]].d, [[PG0]]/z, [[VALS]].d, #0 +; CHECK-NEXT: st1d { [[VALS]].d }, [[MASK]], {{\[}}[[PTRS]].d] +; CHECK-NEXT: ret + %vals = load <4 x i64>, <4 x i64>* %a + %ptrs = load <4 x i64*>, <4 x i64*>* %b + %mask = icmp eq <4 x i64> %vals, zeroinitializer + call void @llvm.masked.scatter.v4i64(<4 x i64> %vals, <4 x i64*> %ptrs, i32 8, <4 x i1> %mask) + ret void +} + +define void @masked_scatter_v8i64(<8 x i64>* %a, <8 x i64*>* %b) #0 { +; CHECK-LABEL: masked_scatter_v8i64: +; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].d, vl8 +; VBITS_GE_512-NEXT: ld1d { [[VALS:z[0-9]+]].d }, [[PG0]]/z, [x0] +; VBITS_GE_512-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG0]]/z, [x1] +; VBITS_GE_512-NEXT: cmpeq [[MASK:p[0-9]+]].d, [[PG0]]/z, [[VALS]].d, #0 +; VBITS_GE_512-NEXT: st1d { [[VALS]].d }, [[MASK]], {{\[}}[[PTRS]].d] +; VBITS_BE_512-NEXT: ret + %vals = load <8 x i64>, <8 x i64>* %a + %ptrs = load <8 x i64*>, <8 x i64*>* %b + %mask = icmp eq <8 x i64> %vals, zeroinitializer + call void @llvm.masked.scatter.v8i64(<8 x i64> %vals, <8 x i64*> %ptrs, i32 8, <8 x i1> %mask) + ret void +} + +define void @masked_scatter_v16i64(<16 x i64>* %a, <16 x i64*>* %b) #0 { +; CHECK-LABEL: masked_scatter_v16i64: +; VBITS_GE_1024: ptrue [[PG0:p[0-9]+]].d, vl16 +; VBITS_GE_1024-NEXT: ld1d { [[VALS:z[0-9]+]].d }, [[PG0]]/z, [x0] +; VBITS_GE_1024-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG0]]/z, [x1] +; VBITS_GE_1024-NEXT: cmpeq [[MASK:p[0-9]+]].d, [[PG0]]/z, [[VALS]].d, #0 +; VBITS_GE_1024-NEXT: st1d { [[VALS]].d }, [[MASK]], {{\[}}[[PTRS]].d] +; VBITS_BE_1024-NEXT: ret + %vals = load <16 x i64>, <16 x i64>* %a + %ptrs = load <16 x i64*>, <16 x i64*>* %b + %mask = icmp eq <16 x i64> %vals, zeroinitializer + call void @llvm.masked.scatter.v16i64(<16 x i64> %vals, <16 x i64*> %ptrs, i32 8, <16 x i1> %mask) + ret void +} + +define void @masked_scatter_v32i64(<32 x i64>* %a, <32 x i64*>* %b) #0 { +; CHECK-LABEL: masked_scatter_v32i64: +; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].d, vl32 +; VBITS_GE_2048-NEXT: ld1d { [[VALS:z[0-9]+]].d }, [[PG0]]/z, [x0] +; VBITS_GE_2048-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG0]]/z, [x1] +; VBITS_GE_2048-NEXT: cmpeq [[MASK:p[0-9]+]].d, [[PG0]]/z, [[VALS]].d, #0 +; VBITS_GE_2048-NEXT: st1d { [[VALS]].d }, [[MASK]], {{\[}}[[PTRS]].d] +; VBITS_BE_2048-NEXT: ret + %vals = load <32 x i64>, <32 x i64>* %a + %ptrs = load <32 x i64*>, <32 x i64*>* %b + %mask = icmp eq <32 x i64> %vals, zeroinitializer + call void @llvm.masked.scatter.v32i64(<32 x i64> %vals, <32 x i64*> %ptrs, i32 8, <32 x i1> %mask) + ret void +} + +; +; ST1H (float) +; + +define void @masked_scatter_v2f16(<2 x half>* %a, <2 x half*>* %b) #0 { +; CHECK-LABEL: masked_scatter_v2f16: +; CHECK: ldr s[[VALS:[0-9]+]], [x0] +; CHECK-NEXT: movi d2, #0000000000000000 +; CHECK-NEXT: ldr q[[PTRS:[0-9]+]], [x1] +; CHECK-NEXT: ptrue [[PG0:p[0-9]+]].h, vl4 +; CHECK-NEXT: fcmeq v[[CMP:[0-9]+]].4h, v[[VALS]].4h, #0.0 +; CHECK-NEXT: umov w8, v[[CMP]].h[0] +; CHECK-NEXT: umov w9, v[[CMP]].h[1] +; CHECK-NEXT: fmov s[[CMP]], w8 +; CHECK-NEXT: mov v[[CMP]].s[1], w9 +; CHECK-NEXT: shl v[[CMP]].2s, v[[CMP]].2s, #16 +; CHECK-NEXT: sshr v[[CMP]].2s, v[[CMP]].2s, #16 +; CHECK-NEXT: fmov w9, s[[CMP]] +; CHECK-NEXT: mov w8, v[[CMP]].s[1] +; CHECK-NEXT: mov v[[NCMP:[0-9]+]].h[0], w9 +; CHECK-NEXT: mov v[[NCMP]].h[1], w8 +; CHECK-NEXT: shl v[[NCMP]].4h, v[[NCMP]].4h, #15 +; CHECK-NEXT: uunpklo [[UPK1]].s, z[[VALS]].h +; CHECK-NEXT: sshr v[[NCMP]].4h, v[[NCMP]].4h, #15 +; CHECK-NEXT: cmpne [[MASK:p[0-9]+]].h, [[PG0]]/z, z[[NCMP]].h, #0 +; CHECK-NEXT: uunpklo [[UPK2]].d, [[UPK1]].s +; CHECK-NEXT: st1h { [[UPK2]].d }, [[MASK]], [z[[PTRS]].d] +; CHECK-NEXT: ret + %vals = load <2 x half>, <2 x half>* %a + %ptrs = load <2 x half*>, <2 x half*>* %b + %mask = fcmp oeq <2 x half> %vals, zeroinitializer + call void @llvm.masked.scatter.v2f16(<2 x half> %vals, <2 x half*> %ptrs, i32 8, <2 x i1> %mask) + ret void +} + +define void @masked_scatter_v4f16(<4 x half>* %a, <4 x half*>* %b) #0 { +; CHECK-LABEL: masked_scatter_v4f16: +; CHECK: ldr d[[VALS:[0-9]+]], [x0] +; CHECK-NEXT: ptrue [[PG0:p[0-9]+]].d, vl4 +; CHECK-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG0]]/z, [x1] +; CHECK-NEXT: ptrue [[PG1:p[0-9]+]].h, vl4 +; CHECK-NEXT: fcmeq v[[CMP:[0-9]+]].4h, v[[VALS]].4h, #0 +; CHECK-NEXT: uunpklo [[UPK1:z[0-9]+]].s, z[[VALS]].h +; CHECK-NEXT: cmpne [[MASK:p[0-9]+]].h, [[PG1]]/z, z[[CMP]].h, #0 +; CHECK-NEXT: uunpklo [[UPK2:z[0-9]+]].d, [[UPK1]].s +; CHECK-NEXT: st1h { [[UPK2]].d }, [[MASK]], {{\[}}[[PTRS]].d] +; CHECK-NEXT: ret + %vals = load <4 x half>, <4 x half>* %a + %ptrs = load <4 x half*>, <4 x half*>* %b + %mask = fcmp oeq <4 x half> %vals, zeroinitializer + call void @llvm.masked.scatter.v4f16(<4 x half> %vals, <4 x half*> %ptrs, i32 8, <4 x i1> %mask) + ret void +} + +define void @masked_scatter_v8f16(<8 x half>* %a, <8 x half*>* %b) #0 { +; CHECK-LABEL: masked_scatter_v8f16: +; VBITS_GE_512: ldr q[[VALS:[0-9]+]], [x0] +; VBITS_GE_512-NEXT: ptrue [[PG0:p[0-9]+]].d, vl8 +; VBITS_GE_512-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG0]]/z, [x1] +; VBITS_GE_512-NEXT: ptrue [[PG1:p[0-9]+]].h, vl8 +; VBITS_GE_512-NEXT: fcmeq v[[CMP:[0-9]+]].8h, v[[VALS]].8h, #0 +; VBITS_GE_512-NEXT: uunpklo [[UPK1:z[0-9]+]].s, z[[VALS]].h +; VBITS_GE_512-NEXT: cmpne [[MASK:p[0-9]+]].h, [[PG1]]/z, z[[CMP]].h, #0 +; VBITS_GE_512-NEXT: uunpklo [[UPK2:z[0-9]+]].d, [[UPK1]].s +; VBITS_GE_512-NEXT: st1h { [[UPK2]].d }, [[MASK]], {{\[}}[[PTRS]].d] +; VBITS_BE_512-NEXT: ret + %vals = load <8 x half>, <8 x half>* %a + %ptrs = load <8 x half*>, <8 x half*>* %b + %mask = fcmp oeq <8 x half> %vals, zeroinitializer + call void @llvm.masked.scatter.v8f16(<8 x half> %vals, <8 x half*> %ptrs, i32 8, <8 x i1> %mask) + ret void +} + +define void @masked_scatter_v16f16(<16 x half>* %a, <16 x half*>* %b) #0 { +; CHECK-LABEL: masked_scatter_v16f16: +; VBITS_GE_1024: ptrue [[PG0:p[0-9]+]].h, vl16 +; VBITS_GE_1024-NEXT: ld1h { [[VALS:z[0-9]+]].h }, [[PG0]]/z, [x0] +; VBITS_GE_1024-NEXT: ptrue [[PG1:p[0-9]+]].d, vl16 +; VBITS_GE_1024-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1] +; VBITS_GE_1024-NEXT: mov [[ZERO:z[0-9]+]].h, #0 +; VBITS_GE_1024-NEXT: fcmeq [[MASK:p[0-9]+]].h, [[PG0]]/z, [[VALS]].h, [[ZERO]] +; VBITS_GE_1024-NEXT: uunpklo [[UPK1:z[0-9]+]].s, [[VALS]].h +; VBITS_GE_1024-NEXT: uunpklo [[UPK2:z[0-9]+]].d, [[UPK1]].s +; VBITS_GE_1024-NEXT: st1h { [[UPK2]].d }, [[MASK]], {{\[}}[[PTRS]].d] +; VBITS_BE_1024-NEXT: ret + %vals = load <16 x half>, <16 x half>* %a + %ptrs = load <16 x half*>, <16 x half*>* %b + %mask = fcmp oeq <16 x half> %vals, zeroinitializer + call void @llvm.masked.scatter.v16f16(<16 x half> %vals, <16 x half*> %ptrs, i32 8, <16 x i1> %mask) + ret void +} + +define void @masked_scatter_v32f16(<32 x half>* %a, <32 x half*>* %b) #0 { +; CHECK-LABEL: masked_scatter_v32f16: +; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].h, vl32 +; VBITS_GE_2048-NEXT: ld1h { [[VALS:z[0-9]+]].h }, [[PG0]]/z, [x0] +; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].d, vl32 +; VBITS_GE_2048-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1] +; VBITS_GE_2048-NEXT: mov [[ZERO:z[0-9]+]].h, #0 +; VBITS_GE_2048-NEXT: fcmeq [[MASK:p[0-9]+]].h, [[PG0]]/z, [[VALS]].h, [[ZERO]] +; VBITS_GE_2048-NEXT: uunpklo [[UPK1:z[0-9]+]].s, [[VALS]].h +; VBITS_GE_2048-NEXT: uunpklo [[UPK2:z[0-9]+]].d, [[UPK1]].s +; VBITS_GE_2048-NEXT: st1h { [[UPK2]].d }, [[MASK]], {{\[}}[[PTRS]].d] +; VBITS_BE_2048-NEXT: ret + %vals = load <32 x half>, <32 x half>* %a + %ptrs = load <32 x half*>, <32 x half*>* %b + %mask = fcmp oeq <32 x half> %vals, zeroinitializer + call void @llvm.masked.scatter.v32f16(<32 x half> %vals, <32 x half*> %ptrs, i32 8, <32 x i1> %mask) + ret void +} + +; +; ST1W (float) +; + +define void @masked_scatter_v2f32(<2 x float>* %a, <2 x float*>* %b) #0 { +; CHECK-LABEL: masked_scatter_v2f32: +; CHECK: ldr d[[VALS:[0-9]+]], [x0] +; CHECK-NEXT: ldr q[[PTRS:[0-9]+]], [x1] +; CHECK-NEXT: ptrue [[PG0:p[0-9]+]].s, vl2 +; CHECK-NEXT: fcmeq v[[CMP:[0-9]+]].2s, v[[VALS]].2s, #0 +; CHECK-NEXT: cmpne [[MASK:p[0-9]+]].s, [[PG0]]/z, z[[CMP]].s, #0 +; CHECK-NEXT: ushll v[[SHL:[0-9]+]].2d, v[[VALS]].2s, #0 +; CHECK-NEXT: st1w { z[[SHL]].d }, [[MASK]], [z[[PTRS]].d] +; CHECK-NEXT: ret + %vals = load <2 x float>, <2 x float>* %a + %ptrs = load <2 x float*>, <2 x float*>* %b + %mask = fcmp oeq <2 x float> %vals, zeroinitializer + call void @llvm.masked.scatter.v2f32(<2 x float> %vals, <2 x float*> %ptrs, i32 8, <2 x i1> %mask) + ret void +} + +define void @masked_scatter_v4f32(<4 x float>* %a, <4 x float*>* %b) #0 { +; CHECK-LABEL: masked_scatter_v4f32: +; CHECK: ldr q[[VALS:[0-9]+]], [x0] +; CHECK-NEXT: ptrue [[PG0:p[0-9]+]].d, vl4 +; CHECK-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG0]]/z, [x1] +; CHECK-NEXT: ptrue [[PG1:p[0-9]+]].s, vl4 +; CHECK-NEXT: fcmeq v[[CMP:[0-9]+]].4s, v[[VALS]].4s, #0 +; CHECK-NEXT: cmpne [[MASK:p[0-9]+]].s, [[PG1]]/z, z[[CMP]].s, #0 +; CHECK-NEXT: uunpklo [[UPK:z[0-9]+]].d, z[[VALS]].s +; CHECK-NEXT: st1w { [[UPK]].d }, [[MASK]], {{\[}}[[PTRS]].d] +; CHECK-NEXT: ret + %vals = load <4 x float>, <4 x float>* %a + %ptrs = load <4 x float*>, <4 x float*>* %b + %mask = fcmp oeq <4 x float> %vals, zeroinitializer + call void @llvm.masked.scatter.v4f32(<4 x float> %vals, <4 x float*> %ptrs, i32 8, <4 x i1> %mask) + ret void +} + +define void @masked_scatter_v8f32(<8 x float>* %a, <8 x float*>* %b) #0 { +; CHECK-LABEL: masked_scatter_v8f32: +; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].s, vl8 +; VBITS_GE_512-NEXT: ld1w { [[VALS:z[0-9]+]].s }, [[PG0]]/z, [x0] +; VBITS_GE_512-NEXT: ptrue [[PG1:p[0-9]+]].d, vl8 +; VBITS_GE_512-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1] +; VBITS_GE_512-NEXT: mov [[ZERO:z[0-9]+]].s, #0 +; VBITS_GE_512-NEXT: fcmeq [[MASK:p[0-9]+]].s, [[PG0]]/z, [[VALS]].s, [[ZERO]] +; VBITS_GE_512-NEXT: uunpklo [[UPK:z[0-9]+]].d, [[VALS]].s +; VBITS_GE_512-NEXT: st1w { [[UPK]].d }, [[MASK]], {{\[}}[[PTRS]].d] +; VBITS_BE_512-NEXT: ret + %vals = load <8 x float>, <8 x float>* %a + %ptrs = load <8 x float*>, <8 x float*>* %b + %mask = fcmp oeq <8 x float> %vals, zeroinitializer + call void @llvm.masked.scatter.v8f32(<8 x float> %vals, <8 x float*> %ptrs, i32 8, <8 x i1> %mask) + ret void +} + +define void @masked_scatter_v16f32(<16 x float>* %a, <16 x float*>* %b) #0 { +; CHECK-LABEL: masked_scatter_v16f32: +; VBITS_GE_1024: ptrue [[PG0:p[0-9]+]].s, vl16 +; VBITS_GE_1024-NEXT: ld1w { [[VALS:z[0-9]+]].s }, [[PG0]]/z, [x0] +; VBITS_GE_1024-NEXT: ptrue [[PG1:p[0-9]+]].d, vl16 +; VBITS_GE_1024-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1] +; VBITS_GE_1024-NEXT: mov [[ZERO:z[0-9]+]].s, #0 +; VBITS_GE_1024-NEXT: fcmeq [[MASK:p[0-9]+]].s, [[PG0]]/z, [[VALS]].s, [[ZERO]] +; VBITS_GE_1024-NEXT: uunpklo [[UPK:z[0-9]+]].d, [[VALS]].s +; VBITS_GE_1024-NEXT: st1w { [[UPK]].d }, [[MASK]], {{\[}}[[PTRS]].d] +; VBITS_BE_1024-NEXT: ret + %vals = load <16 x float>, <16 x float>* %a + %ptrs = load <16 x float*>, <16 x float*>* %b + %mask = fcmp oeq <16 x float> %vals, zeroinitializer + call void @llvm.masked.scatter.v16f32(<16 x float> %vals, <16 x float*> %ptrs, i32 8, <16 x i1> %mask) + ret void +} + +define void @masked_scatter_v32f32(<32 x float>* %a, <32 x float*>* %b) #0 { +; CHECK-LABEL: masked_scatter_v32f32: +; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].s, vl32 +; VBITS_GE_2048-NEXT: ld1w { [[VALS:z[0-9]+]].s }, [[PG0]]/z, [x0] +; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].d, vl32 +; VBITS_GE_2048-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1] +; VBITS_GE_2048-NEXT: mov [[ZERO:z[0-9]+]].s, #0 +; VBITS_GE_2048-NEXT: fcmeq [[MASK:p[0-9]+]].s, [[PG0]]/z, [[VALS]].s, [[ZERO]] +; VBITS_GE_2048-NEXT: uunpklo [[UPK:z[0-9]+]].d, [[VALS]].s +; VBITS_GE_2048-NEXT: st1w { [[UPK]].d }, [[MASK]], {{\[}}[[PTRS]].d] +; VBITS_BE_2048-NEXT: ret + %vals = load <32 x float>, <32 x float>* %a + %ptrs = load <32 x float*>, <32 x float*>* %b + %mask = fcmp oeq <32 x float> %vals, zeroinitializer + call void @llvm.masked.scatter.v32f32(<32 x float> %vals, <32 x float*> %ptrs, i32 8, <32 x i1> %mask) + ret void +} + +; +; ST1D (float) +; + +; Scalarize 1 x double scatters +define void @masked_scatter_v1f64(<1 x double>* %a, <1 x double*>* %b) #0 { +; CHECK-LABEL: masked_scatter_v1f64: +; CHECK-NOT: ptrue + %vals = load <1 x double>, <1 x double>* %a + %ptrs = load <1 x double*>, <1 x double*>* %b + %mask = fcmp oeq <1 x double> %vals, zeroinitializer + call void @llvm.masked.scatter.v1f64(<1 x double> %vals, <1 x double*> %ptrs, i32 8, <1 x i1> %mask) + ret void +} + +define void @masked_scatter_v2f64(<2 x double>* %a, <2 x double*>* %b) #0 { +; CHECK-LABEL: masked_scatter_v2f64: +; CHECK: ldr q[[VALS:[0-9]+]], [x0] +; CHECK-NEXT: ldr q[[PTRS:[0-9]+]], [x1] +; CHECK-NEXT: ptrue [[PG0:p[0-9]+]].d, vl2 +; CHECK-NEXT: fcmeq v[[CMP:[0-9]+]].2d, v[[VALS]].2d, #0 +; CHECK-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG0]]/z, z[[CMP]].d, #0 +; CHECK-NEXT: st1d { z[[VALS]].d }, [[MASK]], [z[[PTRS]].d] +; CHECK-NEXT: ret + %vals = load <2 x double>, <2 x double>* %a + %ptrs = load <2 x double*>, <2 x double*>* %b + %mask = fcmp oeq <2 x double> %vals, zeroinitializer + call void @llvm.masked.scatter.v2f64(<2 x double> %vals, <2 x double*> %ptrs, i32 8, <2 x i1> %mask) + ret void +} + +define void @masked_scatter_v4f64(<4 x double>* %a, <4 x double*>* %b) #0 { +; CHECK-LABEL: masked_scatter_v4f64: +; CHECK: ptrue [[PG0:p[0-9]+]].d, vl4 +; CHECK-NEXT: ld1d { [[VALS:z[0-9]+]].d }, [[PG0]]/z, [x0] +; CHECK-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG0]]/z, [x1] +; CHECK-NEXT: mov [[ZERO:z[0-9]+]].d, #0 +; CHECK-NEXT: fcmeq [[MASK:p[0-9]+]].d, [[PG0]]/z, [[VALS]].d, [[ZERO]].d +; CHECK-NEXT: st1d { [[VALS]].d }, [[MASK]], {{\[}}[[PTRS]].d] +; CHECK-NEXT: ret + %vals = load <4 x double>, <4 x double>* %a + %ptrs = load <4 x double*>, <4 x double*>* %b + %mask = fcmp oeq <4 x double> %vals, zeroinitializer + call void @llvm.masked.scatter.v4f64(<4 x double> %vals, <4 x double*> %ptrs, i32 8, <4 x i1> %mask) + ret void +} + +define void @masked_scatter_v8f64(<8 x double>* %a, <8 x double*>* %b) #0 { +; CHECK-LABEL: masked_scatter_v8f64: +; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].d, vl8 +; VBITS_GE_512-NEXT: ld1d { [[VALS:z[0-9]+]].d }, [[PG0]]/z, [x0] +; VBITS_GE_512-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG0]]/z, [x1] +; VBITS_GE_512-NEXT: mov [[ZERO:z[0-9]+]].d, #0 +; VBITS_GE_512-NEXT: fcmeq [[MASK:p[0-9]+]].d, [[PG0]]/z, [[VALS]].d, [[ZERO]] +; VBITS_GE_512-NEXT: st1d { [[VALS]].d }, [[MASK]], {{\[}}[[PTRS]].d] +; VBITS_BE_512-NEXT: ret + %vals = load <8 x double>, <8 x double>* %a + %ptrs = load <8 x double*>, <8 x double*>* %b + %mask = fcmp oeq <8 x double> %vals, zeroinitializer + call void @llvm.masked.scatter.v8f64(<8 x double> %vals, <8 x double*> %ptrs, i32 8, <8 x i1> %mask) + ret void +} + +define void @masked_scatter_v16f64(<16 x double>* %a, <16 x double*>* %b) #0 { +; CHECK-LABEL: masked_scatter_v16f64: +; VBITS_GE_1024: ptrue [[PG0:p[0-9]+]].d, vl16 +; VBITS_GE_1024-NEXT: ld1d { [[VALS:z[0-9]+]].d }, [[PG0]]/z, [x0] +; VBITS_GE_1024-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG0]]/z, [x1] +; VBITS_GE_1024-NEXT: mov [[ZERO:z[0-9]+]].d, #0 +; VBITS_GE_1024-NEXT: fcmeq [[MASK:p[0-9]+]].d, [[PG0]]/z, [[VALS]].d, [[ZERO]] +; VBITS_GE_1024-NEXT: st1d { [[VALS]].d }, [[MASK]], {{\[}}[[PTRS]].d] +; VBITS_BE_1024-NEXT: ret + %vals = load <16 x double>, <16 x double>* %a + %ptrs = load <16 x double*>, <16 x double*>* %b + %mask = fcmp oeq <16 x double> %vals, zeroinitializer + call void @llvm.masked.scatter.v16f64(<16 x double> %vals, <16 x double*> %ptrs, i32 8, <16 x i1> %mask) + ret void +} + +define void @masked_scatter_v32f64(<32 x double>* %a, <32 x double*>* %b) #0 { +; CHECK-LABEL: masked_scatter_v32f64: +; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].d, vl32 +; VBITS_GE_2048-NEXT: ld1d { [[VALS:z[0-9]+]].d }, [[PG0]]/z, [x0] +; VBITS_GE_2048-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG0]]/z, [x1] +; VBITS_GE_2048-NEXT: mov [[ZERO:z[0-9]+]].d, #0 +; VBITS_GE_2048-NEXT: fcmeq [[MASK:p[0-9]+]].d, [[PG0]]/z, [[VALS]].d, [[ZERO]] +; VBITS_GE_2048-NEXT: st1d { [[VALS]].d }, [[MASK]], {{\[}}[[PTRS]].d] +; VBITS_BE_2048-NEXT: ret + %vals = load <32 x double>, <32 x double>* %a + %ptrs = load <32 x double*>, <32 x double*>* %b + %mask = fcmp oeq <32 x double> %vals, zeroinitializer + call void @llvm.masked.scatter.v32f64(<32 x double> %vals, <32 x double*> %ptrs, i32 8, <32 x i1> %mask) + ret void +} + +; The above tests test the types, the below tests check that the addressing +; modes still function + +define void @masked_scatter_32b_scaled_sext(<32 x half>* %a, <32 x i32>* %b, half* %base) #0 { +; CHECK-LABEL: masked_scatter_32b_scaled_sext: +; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].h, vl32 +; VBITS_GE_2048-NEXT: ld1h { [[VALS:z[0-9]+]].h }, [[PG0]]/z, [x0] +; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].s, vl32 +; VBITS_GE_2048-NEXT: ld1w { [[PTRS:z[0-9]+]].s }, [[PG1]]/z, [x1] +; VBITS_GE_2048-NEXT: mov [[ZERO:z[0-9]+]].h, #0 +; VBITS_GE_2048-NEXT: fcmeq [[MASK:p[0-9]+]].h, [[PG0]]/z, [[VALS]].h, [[ZERO]] +; VBITS_GE_2048-NEXT: uunpklo [[UPK:z[0-9]+]].s, [[VALS]].h +; VBITS_GE_2048-NEXT: st1h { [[VALS]].s }, [[MASK]], [x2, [[PTRS]].s, sxtw #1] +; VBITS_GE_2048-NEXT: ret + %vals = load <32 x half>, <32 x half>* %a + %idxs = load <32 x i32>, <32 x i32>* %b + %ext = sext <32 x i32> %idxs to <32 x i64> + %ptrs = getelementptr half, half* %base, <32 x i64> %ext + %mask = fcmp oeq <32 x half> %vals, zeroinitializer + call void @llvm.masked.scatter.v32f16(<32 x half> %vals, <32 x half*> %ptrs, i32 8, <32 x i1> %mask) + ret void +} + +define void @masked_scatter_32b_scaled_zext(<32 x half>* %a, <32 x i32>* %b, half* %base) #0 { +; CHECK-LABEL: masked_scatter_32b_scaled_zext: +; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].h, vl32 +; VBITS_GE_2048-NEXT: ld1h { [[VALS:z[0-9]+]].h }, [[PG0]]/z, [x0] +; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].s, vl32 +; VBITS_GE_2048-NEXT: ld1w { [[PTRS:z[0-9]+]].s }, [[PG1]]/z, [x1] +; VBITS_GE_2048-NEXT: mov [[ZERO:z[0-9]+]].h, #0 +; VBITS_GE_2048-NEXT: fcmeq [[MASK:p[0-9]+]].h, [[PG0]]/z, [[VALS]].h, [[ZERO]] +; VBITS_GE_2048-NEXT: uunpklo [[UPK:z[0-9]+]].s, [[VALS]].h +; VBITS_GE_2048-NEXT: st1h { [[VALS]].s }, [[MASK]], [x2, [[PTRS]].s, uxtw #1] +; VBITS_GE_2048-NEXT: ret + %vals = load <32 x half>, <32 x half>* %a + %idxs = load <32 x i32>, <32 x i32>* %b + %ext = zext <32 x i32> %idxs to <32 x i64> + %ptrs = getelementptr half, half* %base, <32 x i64> %ext + %mask = fcmp oeq <32 x half> %vals, zeroinitializer + call void @llvm.masked.scatter.v32f16(<32 x half> %vals, <32 x half*> %ptrs, i32 8, <32 x i1> %mask) + ret void +} + +define void @masked_scatter_32b_unscaled_sext(<32 x half>* %a, <32 x i32>* %b, i8* %base) #0 { +; CHECK-LABEL: masked_scatter_32b_unscaled_sext: +; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].h, vl32 +; VBITS_GE_2048-NEXT: ld1h { [[VALS:z[0-9]+]].h }, [[PG0]]/z, [x0] +; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].s, vl32 +; VBITS_GE_2048-NEXT: ld1w { [[PTRS:z[0-9]+]].s }, [[PG1]]/z, [x1] +; VBITS_GE_2048-NEXT: mov [[ZERO:z[0-9]+]].h, #0 +; VBITS_GE_2048-NEXT: fcmeq [[MASK:p[0-9]+]].h, [[PG0]]/z, [[VALS]].h, [[ZERO]] +; VBITS_GE_2048-NEXT: uunpklo [[UPK:z[0-9]+]].s, [[VALS]].h +; VBITS_GE_2048-NEXT: st1h { [[VALS]].s }, [[MASK]], [x2, [[PTRS]].s, sxtw] +; VBITS_GE_2048-NEXT: ret + %vals = load <32 x half>, <32 x half>* %a + %idxs = load <32 x i32>, <32 x i32>* %b + %ext = sext <32 x i32> %idxs to <32 x i64> + %byte_ptrs = getelementptr i8, i8* %base, <32 x i64> %ext + %ptrs = bitcast <32 x i8*> %byte_ptrs to <32 x half*> + %mask = fcmp oeq <32 x half> %vals, zeroinitializer + call void @llvm.masked.scatter.v32f16(<32 x half> %vals, <32 x half*> %ptrs, i32 8, <32 x i1> %mask) + ret void +} + +define void @masked_scatter_32b_unscaled_zext(<32 x half>* %a, <32 x i32>* %b, i8* %base) #0 { +; CHECK-LABEL: masked_scatter_32b_unscaled_zext: +; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].h, vl32 +; VBITS_GE_2048-NEXT: ld1h { [[VALS:z[0-9]+]].h }, [[PG0]]/z, [x0] +; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].s, vl32 +; VBITS_GE_2048-NEXT: ld1w { [[PTRS:z[0-9]+]].s }, [[PG1]]/z, [x1] +; VBITS_GE_2048-NEXT: mov [[ZERO:z[0-9]+]].h, #0 +; VBITS_GE_2048-NEXT: fcmeq [[MASK:p[0-9]+]].h, [[PG0]]/z, [[VALS]].h, [[ZERO]] +; VBITS_GE_2048-NEXT: uunpklo [[UPK:z[0-9]+]].s, [[VALS]].h +; VBITS_GE_2048-NEXT: st1h { [[VALS]].s }, [[MASK]], [x2, [[PTRS]].s, uxtw] +; VBITS_GE_2048-NEXT: ret + %vals = load <32 x half>, <32 x half>* %a + %idxs = load <32 x i32>, <32 x i32>* %b + %ext = zext <32 x i32> %idxs to <32 x i64> + %byte_ptrs = getelementptr i8, i8* %base, <32 x i64> %ext + %ptrs = bitcast <32 x i8*> %byte_ptrs to <32 x half*> + %mask = fcmp oeq <32 x half> %vals, zeroinitializer + call void @llvm.masked.scatter.v32f16(<32 x half> %vals, <32 x half*> %ptrs, i32 8, <32 x i1> %mask) + ret void +} + +define void @masked_scatter_64b_scaled(<32 x float>* %a, <32 x i64>* %b, float* %base) #0 { +; CHECK-LABEL: masked_scatter_64b_scaled: +; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].s, vl32 +; VBITS_GE_2048-NEXT: ld1w { [[VALS:z[0-9]+]].s }, [[PG0]]/z, [x0] +; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].d, vl32 +; VBITS_GE_2048-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1] +; VBITS_GE_2048-NEXT: mov [[ZERO:z[0-9]+]].s, #0 +; VBITS_GE_2048-NEXT: fcmeq [[MASK:p[0-9]+]].s, [[PG0]]/z, [[VALS]].s, [[ZERO]] +; VBITS_GE_2048-NEXT: uunpklo [[UPK:z[0-9]+]].d, [[VALS]].s +; VBITS_GE_2048-NEXT: st1w { [[VALS]].d }, [[MASK]], [x2, [[PTRS]].d, lsl #2] +; VBITS_GE_2048-NEXT: ret + %vals = load <32 x float>, <32 x float>* %a + %idxs = load <32 x i64>, <32 x i64>* %b + %ptrs = getelementptr float, float* %base, <32 x i64> %idxs + %mask = fcmp oeq <32 x float> %vals, zeroinitializer + call void @llvm.masked.scatter.v32f32(<32 x float> %vals, <32 x float*> %ptrs, i32 8, <32 x i1> %mask) + ret void +} + +define void @masked_scatter_64b_unscaled(<32 x float>* %a, <32 x i64>* %b, i8* %base) #0 { +; CHECK-LABEL: masked_scatter_64b_unscaled: +; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].s, vl32 +; VBITS_GE_2048-NEXT: ld1w { [[VALS:z[0-9]+]].s }, [[PG0]]/z, [x0] +; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].d, vl32 +; VBITS_GE_2048-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1] +; VBITS_GE_2048-NEXT: mov [[ZERO:z[0-9]+]].s, #0 +; VBITS_GE_2048-NEXT: fcmeq [[MASK:p[0-9]+]].s, [[PG0]]/z, [[VALS]].s, [[ZERO]] +; VBITS_GE_2048-NEXT: uunpklo [[UPK:z[0-9]+]].d, [[VALS]].s +; VBITS_GE_2048-NEXT: st1w { [[VALS]].d }, [[MASK]], [x2, [[PTRS]].d] +; VBITS_GE_2048-NEXT: ret + %vals = load <32 x float>, <32 x float>* %a + %idxs = load <32 x i64>, <32 x i64>* %b + %byte_ptrs = getelementptr i8, i8* %base, <32 x i64> %idxs + %ptrs = bitcast <32 x i8*> %byte_ptrs to <32 x float*> + %mask = fcmp oeq <32 x float> %vals, zeroinitializer + call void @llvm.masked.scatter.v32f32(<32 x float> %vals, <32 x float*> %ptrs, i32 8, <32 x i1> %mask) + ret void +} + +; FIXME: This case does not yet codegen well due to deficiencies in opcode selection +define void @masked_scatter_vec_plus_reg(<32 x float>* %a, <32 x i8*>* %b, i64 %off) #0 { +; CHECK-LABEL: masked_scatter_vec_plus_reg: +; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].s, vl32 +; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].d, vl32 +; VBITS_GE_2048-NEXT: ld1w { [[VALS:z[0-9]+]].s }, [[PG0]]/z, [x0] +; VBITS_GE_2048-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1] +; VBITS_GE_2048-NEXT: mov [[OFF:z[0-9]+]].d, x2 +; VBITS_GE_2048-NEXT: mov [[ZERO:z[0-9]+]].s, #0 +; VBITS_GE_2048-NEXT: fcmeq [[MASK:p[0-9]+]].s, [[PG0]]/z, [[VALS]].s, [[ZERO]] +; VBITS_GE_2048-NEXT: add [[PTRS_ADD:z[0-9]+]].d, [[PG1]]/m, [[PTRS]].d, [[OFF]].d +; VBITS_GE_2048-NEXT: uunpklo [[UPK:z[0-9]+]].d, [[VALS]].s +; VBITS_GE_2048-NEXT: st1w { [[VALS]].d }, [[MASK]], {{\[}}[[PTRS_ADD]].d] +; VBITS_GE_2048-NEXT: ret + %vals = load <32 x float>, <32 x float>* %a + %bases = load <32 x i8*>, <32 x i8*>* %b + %byte_ptrs = getelementptr i8, <32 x i8*> %bases, i64 %off + %ptrs = bitcast <32 x i8*> %byte_ptrs to <32 x float*> + %mask = fcmp oeq <32 x float> %vals, zeroinitializer + call void @llvm.masked.scatter.v32f32(<32 x float> %vals, <32 x float*> %ptrs, i32 8, <32 x i1> %mask) + ret void +} + +; FIXME: This case does not yet codegen well due to deficiencies in opcode selection +define void @masked_scatter_vec_plus_imm(<32 x float>* %a, <32 x i8*>* %b) #0 { +; CHECK-LABEL: masked_scatter_vec_plus_imm: +; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].s, vl32 +; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].d, vl32 +; VBITS_GE_2048-NEXT: ld1w { [[VALS:z[0-9]+]].s }, [[PG0]]/z, [x0] +; VBITS_GE_2048-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1] +; VBITS_GE_2048-NEXT: mov [[OFF:z[0-9]+]].d, #4 +; VBITS_GE_2048-NEXT: mov [[ZERO:z[0-9]+]].s, #0 +; VBITS_GE_2048-NEXT: fcmeq [[MASK:p[0-9]+]].s, [[PG0]]/z, [[VALS]].s, [[ZERO]] +; VBITS_GE_2048-NEXT: add [[PTRS_ADD:z[0-9]+]].d, [[PG1]]/m, [[PTRS]].d, [[OFF]].d +; VBITS_GE_2048-NEXT: uunpklo [[UPK:z[0-9]+]].d, [[VALS]].s +; VBITS_GE_2048-NEXT: st1w { [[VALS]].d }, [[MASK]], {{\[}}[[PTRS_ADD]].d] +; VBITS_GE_2048-NEXT: ret + %vals = load <32 x float>, <32 x float>* %a + %bases = load <32 x i8*>, <32 x i8*>* %b + %byte_ptrs = getelementptr i8, <32 x i8*> %bases, i64 4 + %ptrs = bitcast <32 x i8*> %byte_ptrs to <32 x float*> + %mask = fcmp oeq <32 x float> %vals, zeroinitializer + call void @llvm.masked.scatter.v32f32(<32 x float> %vals, <32 x float*> %ptrs, i32 8, <32 x i1> %mask) + ret void +} + +declare void @llvm.masked.scatter.v2i8(<2 x i8>, <2 x i8*>, i32, <2 x i1>) +declare void @llvm.masked.scatter.v4i8(<4 x i8>, <4 x i8*>, i32, <4 x i1>) +declare void @llvm.masked.scatter.v8i8(<8 x i8>, <8 x i8*>, i32, <8 x i1>) +declare void @llvm.masked.scatter.v16i8(<16 x i8>, <16 x i8*>, i32, <16 x i1>) +declare void @llvm.masked.scatter.v32i8(<32 x i8>, <32 x i8*>, i32, <32 x i1>) + +declare void @llvm.masked.scatter.v2i16(<2 x i16>, <2 x i16*>, i32, <2 x i1>) +declare void @llvm.masked.scatter.v4i16(<4 x i16>, <4 x i16*>, i32, <4 x i1>) +declare void @llvm.masked.scatter.v8i16(<8 x i16>, <8 x i16*>, i32, <8 x i1>) +declare void @llvm.masked.scatter.v16i16(<16 x i16>, <16 x i16*>, i32, <16 x i1>) +declare void @llvm.masked.scatter.v32i16(<32 x i16>, <32 x i16*>, i32, <32 x i1>) + +declare void @llvm.masked.scatter.v2i32(<2 x i32>, <2 x i32*>, i32, <2 x i1>) +declare void @llvm.masked.scatter.v4i32(<4 x i32>, <4 x i32*>, i32, <4 x i1>) +declare void @llvm.masked.scatter.v8i32(<8 x i32>, <8 x i32*>, i32, <8 x i1>) +declare void @llvm.masked.scatter.v16i32(<16 x i32>, <16 x i32*>, i32, <16 x i1>) +declare void @llvm.masked.scatter.v32i32(<32 x i32>, <32 x i32*>, i32, <32 x i1>) + +declare void @llvm.masked.scatter.v1i64(<1 x i64>, <1 x i64*>, i32, <1 x i1>) +declare void @llvm.masked.scatter.v2i64(<2 x i64>, <2 x i64*>, i32, <2 x i1>) +declare void @llvm.masked.scatter.v4i64(<4 x i64>, <4 x i64*>, i32, <4 x i1>) +declare void @llvm.masked.scatter.v8i64(<8 x i64>, <8 x i64*>, i32, <8 x i1>) +declare void @llvm.masked.scatter.v16i64(<16 x i64>, <16 x i64*>, i32, <16 x i1>) +declare void @llvm.masked.scatter.v32i64(<32 x i64>, <32 x i64*>, i32, <32 x i1>) + +declare void @llvm.masked.scatter.v2f16(<2 x half>, <2 x half*>, i32, <2 x i1>) +declare void @llvm.masked.scatter.v4f16(<4 x half>, <4 x half*>, i32, <4 x i1>) +declare void @llvm.masked.scatter.v8f16(<8 x half>, <8 x half*>, i32, <8 x i1>) +declare void @llvm.masked.scatter.v16f16(<16 x half>, <16 x half*>, i32, <16 x i1>) +declare void @llvm.masked.scatter.v32f16(<32 x half>, <32 x half*>, i32, <32 x i1>) + +declare void @llvm.masked.scatter.v2f32(<2 x float>, <2 x float*>, i32, <2 x i1>) +declare void @llvm.masked.scatter.v4f32(<4 x float>, <4 x float*>, i32, <4 x i1>) +declare void @llvm.masked.scatter.v8f32(<8 x float>, <8 x float*>, i32, <8 x i1>) +declare void @llvm.masked.scatter.v16f32(<16 x float>, <16 x float*>, i32, <16 x i1>) +declare void @llvm.masked.scatter.v32f32(<32 x float>, <32 x float*>, i32, <32 x i1>) + +declare void @llvm.masked.scatter.v1f64(<1 x double>, <1 x double*>, i32, <1 x i1>) +declare void @llvm.masked.scatter.v2f64(<2 x double>, <2 x double*>, i32, <2 x i1>) +declare void @llvm.masked.scatter.v4f64(<4 x double>, <4 x double*>, i32, <4 x i1>) +declare void @llvm.masked.scatter.v8f64(<8 x double>, <8 x double*>, i32, <8 x i1>) +declare void @llvm.masked.scatter.v16f64(<16 x double>, <16 x double*>, i32, <16 x i1>) +declare void @llvm.masked.scatter.v32f64(<32 x double>, <32 x double*>, i32, <32 x i1>) + +attributes #0 = { "target-features"="+sve" } diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-stores.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-stores.ll --- a/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-stores.ll +++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-stores.ll @@ -91,9 +91,7 @@ ; CHECK-NEXT: ld1w { [[Z0:z[0-9]+]].s }, [[PG0]]/z, [x0] ; CHECK-NEXT: ld1w { [[Z1:z[0-9]+]].s }, [[PG0]]/z, [x1] ; CHECK-NEXT: fcmeq [[PG1:p[0-9]+]].s, [[PG0]]/z, [[Z0]].s, [[Z1]].s -; CHECK-NEXT: mov [[Z2:z[0-9]+]].s, [[PG1]]/z, #-1 -; CHECK-NEXT: cmpne [[PG2:p[0-9]+]].s, [[PG0]]/z, [[Z2]].s, #0 -; CHECK-NEXT: st1w { z0.s }, [[PG2]], [x{{[0-9]+}}] +; CHECK-NEXT: st1w { z0.s }, [[PG1]], [x{{[0-9]+}}] ; CHECK-NEXT: ret %a = load <8 x float>, <8 x float>* %ap %b = load <8 x float>, <8 x float>* %bp @@ -108,9 +106,7 @@ ; VBITS_GE_512-NEXT: ld1w { [[Z0:z[0-9]+]].s }, [[PG0]]/z, [x0] ; VBITS_GE_512-NEXT: ld1w { [[Z1:z[0-9]+]].s }, [[PG0]]/z, [x1] ; VBITS_GE_512-NEXT: fcmeq [[PG1:p[0-9]+]].s, [[PG0]]/z, [[Z0]].s, [[Z1]].s -; VBITS_GE_512-NEXT: mov [[Z2:z[0-9]+]].s, [[PG1]]/z, #-1 -; VBITS_GE_512-NEXT: cmpne [[PG2:p[0-9]+]].s, [[PG0]]/z, [[Z1]].s, #0 -; VBITS_GE_512-NEXT: st1w { z0.s }, [[PG2]], [x{{[0-9]+}}] +; VBITS_GE_512-NEXT: st1w { z0.s }, [[PG1]], [x{{[0-9]+}}] ; VBITS_GE_512-NEXT: ret %a = load <16 x float>, <16 x float>* %ap %b = load <16 x float>, <16 x float>* %bp @@ -125,9 +121,7 @@ ; VBITS_GE_1024-NEXT: ld1w { [[Z0:z[0-9]+]].s }, [[PG0]]/z, [x0] ; VBITS_GE_1024-NEXT: ld1w { [[Z1:z[0-9]+]].s }, [[PG0]]/z, [x1] ; VBITS_GE_1024-NEXT: fcmeq [[PG1:p[0-9]+]].s, [[PG0]]/z, [[Z0]].s, [[Z1]].s -; VBITS_GE_1024-NEXT: mov [[Z1:z[0-9]+]].s, [[PG1]]/z, #-1 -; VBITS_GE_1024-NEXT: cmpne [[PG2:p[0-9]+]].s, [[PG0]]/z, [[Z1]].s, #0 -; VBITS_GE_1024-NEXT: st1w { z0.s }, [[PG2]], [x{{[0-9]+}}] +; VBITS_GE_1024-NEXT: st1w { z0.s }, [[PG1]], [x{{[0-9]+}}] ; VBITS_GE_1024-NEXT: ret %a = load <32 x float>, <32 x float>* %ap %b = load <32 x float>, <32 x float>* %bp @@ -142,9 +136,7 @@ ; VBITS_GE_2048-NEXT: ld1w { [[Z0:z[0-9]+]].s }, [[PG0]]/z, [x0] ; VBITS_GE_2048-NEXT: ld1w { [[Z1:z[0-9]+]].s }, [[PG0]]/z, [x1] ; VBITS_GE_2048-NEXT: fcmeq [[PG1:p[0-9]+]].s, [[PG0]]/z, [[Z0]].s, [[Z1]].s -; VBITS_GE_2048-NEXT: mov [[Z1:z[0-9]+]].s, [[PG1]]/z, #-1 -; VBITS_GE_2048-NEXT: cmpne [[PG2:p[0-9]+]].s, [[PG0]]/z, [[Z1]].s, #0 -; VBITS_GE_2048-NEXT: st1w { z0.s }, [[PG2]], [x{{[0-9]+}}] +; VBITS_GE_2048-NEXT: st1w { z0.s }, [[PG1]], [x{{[0-9]+}}] ; VBITS_GE_2048-NEXT: ret %a = load <64 x float>, <64 x float>* %ap %b = load <64 x float>, <64 x float>* %bp diff --git a/llvm/test/CodeGen/AArch64/sve-masked-gather.ll b/llvm/test/CodeGen/AArch64/sve-masked-gather.ll --- a/llvm/test/CodeGen/AArch64/sve-masked-gather.ll +++ b/llvm/test/CodeGen/AArch64/sve-masked-gather.ll @@ -106,6 +106,27 @@ ret %vals.sext } +define @masked_gather_passthru( %ptrs, %mask, %passthru) { +; CHECK-LABEL: masked_gather_passthru: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1sw { z0.d }, p0/z, [z0.d] +; CHECK-NEXT: sel z0.d, p0, z0.d, z1.d +; CHECK-NEXT: ret + %vals = call @llvm.masked.gather.nxv2i32( %ptrs, i32 4, %mask, %passthru) + %vals.sext = sext %vals to + ret %vals.sext +} + +define @masked_gather_passthru_0( %ptrs, %mask) { +; CHECK-LABEL: masked_gather_passthru_0: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1sw { z0.d }, p0/z, [z0.d] +; CHECK-NEXT: ret + %vals = call @llvm.masked.gather.nxv2i32( %ptrs, i32 4, %mask, zeroinitializer) + %vals.sext = sext %vals to + ret %vals.sext +} + declare @llvm.masked.gather.nxv2i8(, i32, , ) declare @llvm.masked.gather.nxv2i16(, i32, , ) declare @llvm.masked.gather.nxv2i32(, i32, , )