Index: llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp =================================================================== --- llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -9390,13 +9390,13 @@ } // Fold sext/zext of index into index type. -bool refineIndexType(MaskedScatterSDNode *MSC, SDValue &Index, bool Scaled, - SelectionDAG &DAG) { +bool refineIndexType(MaskedGatherScatterSDNode *MGS, SDValue &Index, + bool Scaled, SelectionDAG &DAG) { const TargetLowering &TLI = DAG.getTargetLoweringInfo(); SDValue Op = Index.getOperand(0); if (Index.getOpcode() == ISD::ZERO_EXTEND) { - MSC->setIndexType(Scaled ? ISD::UNSIGNED_SCALED : ISD::UNSIGNED_UNSCALED); + MGS->setIndexType(Scaled ? ISD::UNSIGNED_SCALED : ISD::UNSIGNED_UNSCALED); if (TLI.shouldRemoveExtendFromGSIndex(Op.getValueType())) { Index = Op; return true; @@ -9404,7 +9404,7 @@ } if (Index.getOpcode() == ISD::SIGN_EXTEND) { - MSC->setIndexType(Scaled ? ISD::SIGNED_SCALED : ISD::SIGNED_UNSCALED); + MGS->setIndexType(Scaled ? ISD::SIGNED_SCALED : ISD::SIGNED_UNSCALED); if (TLI.shouldRemoveExtendFromGSIndex(Op.getValueType())) { Index = Op; return true; @@ -9473,11 +9473,32 @@ SDValue DAGCombiner::visitMGATHER(SDNode *N) { MaskedGatherSDNode *MGT = cast(N); SDValue Mask = MGT->getMask(); + SDValue Chain = MGT->getChain(); + SDValue Index = MGT->getIndex(); + SDValue Scale = MGT->getScale(); + SDValue PassThru = MGT->getPassThru(); + SDValue BasePtr = MGT->getBasePtr(); SDLoc DL(N); // Zap gathers with a zero mask. if (ISD::isBuildVectorAllZeros(Mask.getNode())) - return CombineTo(N, MGT->getPassThru(), MGT->getChain()); + return CombineTo(N, PassThru, MGT->getChain()); + + if (refineUniformBase(BasePtr, Index, DAG)) { + SDValue Ops[] = {Chain, PassThru, Mask, BasePtr, Index, Scale}; + return DAG.getMaskedGather(DAG.getVTList(N->getValueType(0), MVT::Other), + PassThru.getValueType(), DL, Ops, + MGT->getMemOperand(), MGT->getIndexType(), + MGT->getExtensionType()); + } + + if (refineIndexType(MGT, Index, MGT->isIndexScaled(), DAG)) { + SDValue Ops[] = {Chain, PassThru, Mask, BasePtr, Index, Scale}; + return DAG.getMaskedGather(DAG.getVTList(N->getValueType(0), MVT::Other), + PassThru.getValueType(), DL, Ops, + MGT->getMemOperand(), MGT->getIndexType(), + MGT->getExtensionType()); + } return SDValue(); } Index: llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp =================================================================== --- llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -7361,17 +7361,22 @@ return SDValue(E, 0); } + IndexType = TLI->getCanonicalIndexType(IndexType, VT, Ops[4]); auto *N = newSDNode(dl.getIROrder(), dl.getDebugLoc(), VTs, ExtTy, VT, MMO, IndexType); createOperands(N, Ops); assert(N->getPassThru().getValueType() == N->getValueType(0) && "Incompatible type of the PassThru value in MaskedGatherSDNode"); - assert(N->getMask().getValueType().getVectorNumElements() == - N->getValueType(0).getVectorNumElements() && + assert(N->getMask().getValueType().getVectorElementCount() == + N->getValueType(0).getVectorElementCount() && "Vector width mismatch between mask and data"); - assert(N->getIndex().getValueType().getVectorNumElements() >= - N->getValueType(0).getVectorNumElements() && + assert(N->getIndex().getValueType().getVectorElementCount().isScalable() == + N->getValueType(0).getVectorElementCount().isScalable() && + "Scalable flags of index and data do not match"); + assert(ElementCount::isKnownGE( + N->getIndex().getValueType().getVectorElementCount(), + N->getValueType(0).getVectorElementCount()) && "Vector width mismatch between index and data"); assert(isa(N->getScale()) && cast(N->getScale())->getAPIntValue().isPowerOf2() && Index: llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp =================================================================== --- llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -4408,7 +4408,7 @@ if (!UniformBase) { Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout())); Index = getValue(Ptr); - IndexType = ISD::SIGNED_SCALED; + IndexType = ISD::SIGNED_UNSCALED; Scale = DAG.getTargetConstant(1, sdl, TLI.getPointerTy(DAG.getDataLayout())); } SDValue Ops[] = { Root, Src0, Mask, Base, Index, Scale }; Index: llvm/lib/Target/AArch64/AArch64ISelLowering.h =================================================================== --- llvm/lib/Target/AArch64/AArch64ISelLowering.h +++ llvm/lib/Target/AArch64/AArch64ISelLowering.h @@ -807,6 +807,7 @@ SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerMGATHER(SDValue Op, SelectionDAG &DAG) const; SDValue LowerMSCATTER(SDValue Op, SelectionDAG &DAG) const; SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const; Index: llvm/lib/Target/AArch64/AArch64ISelLowering.cpp =================================================================== --- llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -1003,6 +1003,7 @@ setOperationAction(ISD::SINT_TO_FP, VT, Custom); setOperationAction(ISD::FP_TO_UINT, VT, Custom); setOperationAction(ISD::FP_TO_SINT, VT, Custom); + setOperationAction(ISD::MGATHER, VT, Custom); setOperationAction(ISD::MSCATTER, VT, Custom); setOperationAction(ISD::MUL, VT, Custom); setOperationAction(ISD::SPLAT_VECTOR, VT, Custom); @@ -1055,6 +1056,7 @@ MVT::nxv4f32, MVT::nxv2f64}) { setOperationAction(ISD::CONCAT_VECTORS, VT, Custom); setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); + setOperationAction(ISD::MGATHER, VT, Custom); setOperationAction(ISD::MSCATTER, VT, Custom); setOperationAction(ISD::SPLAT_VECTOR, VT, Custom); setOperationAction(ISD::SELECT, VT, Custom); @@ -3720,6 +3722,29 @@ return ExtVal.getValueType().isScalableVector(); } +unsigned getGatherVecOpcode(bool IsScaled, bool IsSigned, bool NeedsExtend) { + std::map, unsigned> AddrModes = { + {std::make_tuple(/*Scaled*/ false, /*Signed*/ false, /*Extend*/ false), + AArch64ISD::GLD1_MERGE_ZERO}, + {std::make_tuple(/*Scaled*/ false, /*Signed*/ false, /*Extend*/ true), + AArch64ISD::GLD1_UXTW_MERGE_ZERO}, + {std::make_tuple(/*Scaled*/ false, /*Signed*/ true, /*Extend*/ false), + AArch64ISD::GLD1_MERGE_ZERO}, + {std::make_tuple(/*Scaled*/ false, /*Signed*/ true, /*Extend*/ true), + AArch64ISD::GLD1_SXTW_MERGE_ZERO}, + {std::make_tuple(/*Scaled*/ true, /*Signed*/ false, /*Extend*/ false), + AArch64ISD::GLD1_SCALED_MERGE_ZERO}, + {std::make_tuple(/*Scaled*/ true, /*Signed*/ false, /*Extend*/ true), + AArch64ISD::GLD1_UXTW_SCALED_MERGE_ZERO}, + {std::make_tuple(/*Scaled*/ true, /*Signed*/ true, /*Extend*/ false), + AArch64ISD::GLD1_SCALED_MERGE_ZERO}, + {std::make_tuple(/*Scaled*/ true, /*Signed*/ true, /*Extend*/ true), + AArch64ISD::GLD1_SXTW_SCALED_MERGE_ZERO}, + }; + auto Key = std::make_tuple(IsScaled, IsSigned, NeedsExtend); + return AddrModes.find(Key)->second; +} + unsigned getScatterVecOpcode(bool IsScaled, bool IsSigned, bool NeedsExtend) { std::map, unsigned> AddrModes = { {std::make_tuple(/*Scaled*/ false, /*Signed*/ false, /*Extend*/ false), @@ -3743,7 +3768,7 @@ return AddrModes.find(Key)->second; } -bool getScatterIndexIsExtended(SDValue Index) { +bool getGatherScatterIndexIsExtended(SDValue Index) { unsigned Opcode = Index.getOpcode(); if (Opcode == ISD::SIGN_EXTEND_INREG) return true; @@ -3761,6 +3786,58 @@ return false; } +SDValue AArch64TargetLowering::LowerMGATHER(SDValue Op, + SelectionDAG &DAG) const { + SDLoc DL(Op); + MaskedGatherSDNode *MGT = cast(Op); + assert(MGT && "Can only custom lower gather load nodes"); + + SDValue Index = MGT->getIndex(); + SDValue Chain = MGT->getChain(); + SDValue PassThru = MGT->getPassThru(); + SDValue Mask = MGT->getMask(); + SDValue BasePtr = MGT->getBasePtr(); + + ISD::MemIndexType IndexType = MGT->getIndexType(); + bool IsScaled = + IndexType == ISD::SIGNED_SCALED || IndexType == ISD::UNSIGNED_SCALED; + bool IsSigned = + IndexType == ISD::SIGNED_SCALED || IndexType == ISD::SIGNED_UNSCALED; + bool NeedsExtend = + getGatherScatterIndexIsExtended(Index) || + Index.getSimpleValueType().getVectorElementType() == MVT::i32; + + EVT VT = PassThru.getSimpleValueType(); + EVT MemVT = MGT->getMemoryVT(); + SDValue InputVT = DAG.getValueType(MemVT); + + if (VT.getVectorElementType() == MVT::bf16 && + !static_cast(DAG.getSubtarget()).hasBF16()) + return SDValue(); + + // Handle FP data + if (VT.isFloatingPoint()) { + VT = VT.changeVectorElementTypeToInteger(); + ElementCount EC = VT.getVectorElementCount(); + auto ScalarIntVT = + MVT::getIntegerVT(AArch64::SVEBitsPerBlock / EC.getKnownMinValue()); + PassThru = DAG.getNode(AArch64ISD::REINTERPRET_CAST, DL, + MVT::getVectorVT(ScalarIntVT, EC), PassThru); + + InputVT = DAG.getValueType(MemVT.changeVectorElementTypeToInteger()); + } + + SDVTList VTs = DAG.getVTList(PassThru.getSimpleValueType(), MVT::Other); + + if (getGatherScatterIndexIsExtended(Index)) + Index = Index.getOperand(0); + + unsigned Opcode = getGatherVecOpcode(IsScaled, IsSigned, NeedsExtend); + + SDValue Ops[] = {Chain, Mask, BasePtr, Index, InputVT, PassThru}; + return DAG.getNode(Opcode, DL, VTs, Ops); +} + SDValue AArch64TargetLowering::LowerMSCATTER(SDValue Op, SelectionDAG &DAG) const { SDLoc DL(Op); @@ -3779,7 +3856,7 @@ bool IsSigned = IndexType == ISD::SIGNED_SCALED || IndexType == ISD::SIGNED_UNSCALED; bool NeedsExtend = - getScatterIndexIsExtended(Index) || + getGatherScatterIndexIsExtended(Index) || Index.getSimpleValueType().getVectorElementType() == MVT::i32; EVT VT = StoreVal.getSimpleValueType(); @@ -3803,7 +3880,7 @@ InputVT = DAG.getValueType(MemVT.changeVectorElementTypeToInteger()); } - if (getScatterIndexIsExtended(Index)) + if (getGatherScatterIndexIsExtended(Index)) Index = Index.getOperand(0); SDValue Ops[] = {Chain, StoreVal, Mask, BasePtr, Index, InputVT}; @@ -4088,6 +4165,8 @@ return LowerINTRINSIC_WO_CHAIN(Op, DAG); case ISD::STORE: return LowerSTORE(Op, DAG); + case ISD::MGATHER: + return LowerMGATHER(Op, DAG); case ISD::MSCATTER: return LowerMSCATTER(Op, DAG); case ISD::VECREDUCE_SEQ_FADD: Index: llvm/test/CodeGen/AArch64/sve-masked-gather-32b-signed-scaled.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AArch64/sve-masked-gather-32b-signed-scaled.ll @@ -0,0 +1,158 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve < %s | FileCheck %s + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +; unscaled unpacked 32-bit offsets +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +define @masked_gather_nxv2i16(i16* %base, %offsets, %mask) { +; CHECK-LABEL: masked_gather_nxv2i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, z0.d, sxtw #1] +; CHECK-NEXT: ret + %ptrs = getelementptr i16, i16* %base, %offsets + %vals = call @llvm.masked.gather.nxv2i16( %ptrs, i32 2, %mask, undef) + %vals.zext = zext %vals to + ret %vals.zext +} + +define @masked_gather_nxv2i32(i32* %base, %offsets, %mask) { +; CHECK-LABEL: masked_gather_nxv2i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0, z0.d, sxtw #2] +; CHECK-NEXT: ret + %ptrs = getelementptr i32, i32* %base, %offsets + %vals = call @llvm.masked.gather.nxv2i32( %ptrs, i32 4, %mask, undef) + %vals.zext = zext %vals to + ret %vals.zext +} + +define @masked_gather_nxv2i64(i64* %base, %offsets, %mask) { +; CHECK-LABEL: masked_gather_nxv2i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0, z0.d, sxtw #3] +; CHECK-NEXT: ret + %ptrs = getelementptr i64, i64* %base, %offsets + %vals = call @llvm.masked.gather.nxv2i64( %ptrs, i32 8, %mask, undef) + ret %vals +} + +define @masked_gather_nxv2f16(half* %base, %offsets, %mask) { +; CHECK-LABEL: masked_gather_nxv2f16: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, z0.d, sxtw #1] +; CHECK-NEXT: ret + %ptrs = getelementptr half, half* %base, %offsets + %vals = call @llvm.masked.gather.nxv2f16( %ptrs, i32 2, %mask, undef) + ret %vals +} + +define @masked_gather_nxv2f32(float* %base, %offsets, %mask) { +; CHECK-LABEL: masked_gather_nxv2f32: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0, z0.d, sxtw #2] +; CHECK-NEXT: ret + %ptrs = getelementptr float, float* %base, %offsets + %vals = call @llvm.masked.gather.nxv2f32( %ptrs, i32 4, %mask, undef) + ret %vals +} + +define @masked_gather_nxv2f64(double* %base, %offsets, %mask) { +; CHECK-LABEL: masked_gather_nxv2f64: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0, z0.d, sxtw #3] +; CHECK-NEXT: ret + %ptrs = getelementptr double, double* %base, %offsets + %vals = call @llvm.masked.gather.nxv2f64( %ptrs, i32 8, %mask, undef) + ret %vals +} + +define @masked_sgather_nxv2i16(i16* %base, %offsets, %mask) { +; CHECK-LABEL: masked_sgather_nxv2i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1sh { z0.d }, p0/z, [x0, z0.d, sxtw #1] +; CHECK-NEXT: ret + %ptrs = getelementptr i16, i16* %base, %offsets + %vals = call @llvm.masked.gather.nxv2i16( %ptrs, i32 2, %mask, undef) + %vals.sext = sext %vals to + ret %vals.sext +} + +define @masked_sgather_nxv2i32(i32* %base, %offsets, %mask) { +; CHECK-LABEL: masked_sgather_nxv2i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1sw { z0.d }, p0/z, [x0, z0.d, sxtw #2] +; CHECK-NEXT: ret + %ptrs = getelementptr i32, i32* %base, %offsets + %vals = call @llvm.masked.gather.nxv2i32( %ptrs, i32 4, %mask, undef) + %vals.sext = sext %vals to + ret %vals.sext +} + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +; unscaled packed 32-bit offsets +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +define @masked_gather_nxv4i16(i16* %base, %offsets, %mask) { +; CHECK-LABEL: masked_gather_nxv4i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0, z0.s, sxtw #1] +; CHECK-NEXT: ret + %ptrs = getelementptr i16, i16* %base, %offsets + %vals = call @llvm.masked.gather.nxv4i16( %ptrs, i32 2, %mask, undef) + %vals.zext = zext %vals to + ret %vals.zext +} + +define @masked_gather_nxv4i32(i32* %base, %offsets, %mask) { +; CHECK-LABEL: masked_gather_nxv4i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0, z0.s, sxtw #2] +; CHECK-NEXT: ret + %ptrs = getelementptr i32, i32* %base, %offsets + %vals = call @llvm.masked.gather.nxv4i32( %ptrs, i32 4, %mask, undef) + ret %vals +} + +define @masked_gather_nxv4f16(half* %base, %offsets, %mask) { +; CHECK-LABEL: masked_gather_nxv4f16: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0, z0.s, sxtw #1] +; CHECK-NEXT: ret + %ptrs = getelementptr half, half* %base, %offsets + %vals = call @llvm.masked.gather.nxv4f16( %ptrs, i32 2, %mask, undef) + ret %vals +} + +define @masked_gather_nxv4f32(float* %base, %offsets, %mask) { +; CHECK-LABEL: masked_gather_nxv4f32: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0, z0.s, sxtw #2] +; CHECK-NEXT: ret + %ptrs = getelementptr float, float* %base, %offsets + %vals = call @llvm.masked.gather.nxv4f32( %ptrs, i32 4, %mask, undef) + ret %vals +} + +define @masked_sgather_nxv4i16(i16* %base, %offsets, %mask) { +; CHECK-LABEL: masked_sgather_nxv4i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1sh { z0.s }, p0/z, [x0, z0.s, sxtw #1] +; CHECK-NEXT: ret + %ptrs = getelementptr i16, i16* %base, %offsets + %vals = call @llvm.masked.gather.nxv4i16( %ptrs, i32 2, %mask, undef) + %vals.sext = sext %vals to + ret %vals.sext +} + +declare @llvm.masked.gather.nxv2i16(, i32, , ) +declare @llvm.masked.gather.nxv2i32(, i32, , ) +declare @llvm.masked.gather.nxv2i64(, i32, , ) +declare @llvm.masked.gather.nxv2f16(, i32, , ) +declare @llvm.masked.gather.nxv2f32(, i32, , ) +declare @llvm.masked.gather.nxv2f64(, i32, , ) + +declare @llvm.masked.gather.nxv4i16(, i32, , ) +declare @llvm.masked.gather.nxv4i32(, i32, , ) +declare @llvm.masked.gather.nxv4f16(, i32, , ) +declare @llvm.masked.gather.nxv4f32(, i32, , ) Index: llvm/test/CodeGen/AArch64/sve-masked-gather-32b-signed-unscaled.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AArch64/sve-masked-gather-32b-signed-unscaled.ll @@ -0,0 +1,217 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve < %s | FileCheck %s + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +; unscaled unpacked 32-bit offsets +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +define @masked_gather_nxv2i8(i8* %base, %offsets, %mask) { +; CHECK-LABEL: masked_gather_nxv2i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0, z0.d, sxtw] +; CHECK-NEXT: ret + %ptrs = getelementptr i8, i8* %base, %offsets + %vals = call @llvm.masked.gather.nxv2i8( %ptrs, i32 1, %mask, undef) + %vals.zext = zext %vals to + ret %vals.zext +} + +define @masked_gather_nxv2i16(i8* %base, %offsets, %mask) { +; CHECK-LABEL: masked_gather_nxv2i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, z0.d, sxtw] +; CHECK-NEXT: ret + %byte_ptrs = getelementptr i8, i8* %base, %offsets + %ptrs = bitcast %byte_ptrs to + %vals = call @llvm.masked.gather.nxv2i16( %ptrs, i32 2, %mask, undef) + %vals.zext = zext %vals to + ret %vals.zext +} + +define @masked_gather_nxv2i32(i8* %base, %offsets, %mask) { +; CHECK-LABEL: masked_gather_nxv2i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0, z0.d, sxtw] +; CHECK-NEXT: ret + %byte_ptrs = getelementptr i8, i8* %base, %offsets + %ptrs = bitcast %byte_ptrs to + %vals = call @llvm.masked.gather.nxv2i32( %ptrs, i32 4, %mask, undef) + %vals.zext = zext %vals to + ret %vals.zext +} + +define @masked_gather_nxv2i64(i8* %base, %offsets, %mask) { +; CHECK-LABEL: masked_gather_nxv2i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0, z0.d, sxtw] +; CHECK-NEXT: ret + %byte_ptrs = getelementptr i8, i8* %base, %offsets + %ptrs = bitcast %byte_ptrs to + %vals = call @llvm.masked.gather.nxv2i64( %ptrs, i32 8, %mask, undef) + ret %vals +} + +define @masked_gather_nxv2f16(i8* %base, %offsets, %mask) { +; CHECK-LABEL: masked_gather_nxv2f16: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, z0.d, sxtw] +; CHECK-NEXT: ret + %byte_ptrs = getelementptr i8, i8* %base, %offsets + %ptrs = bitcast %byte_ptrs to + %vals = call @llvm.masked.gather.nxv2f16( %ptrs, i32 2, %mask, undef) + ret %vals +} + +define @masked_gather_nxv2f32(i8* %base, %offsets, %mask) { +; CHECK-LABEL: masked_gather_nxv2f32: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0, z0.d, sxtw] +; CHECK-NEXT: ret + %byte_ptrs = getelementptr i8, i8* %base, %offsets + %ptrs = bitcast %byte_ptrs to + %vals = call @llvm.masked.gather.nxv2f32( %ptrs, i32 4, %mask, undef) + ret %vals +} + +define @masked_gather_nxv2f64(i8* %base, %offsets, %mask) { +; CHECK-LABEL: masked_gather_nxv2f64: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0, z0.d, sxtw] +; CHECK-NEXT: ret + %byte_ptrs = getelementptr i8, i8* %base, %offsets + %ptrs = bitcast %byte_ptrs to + %vals = call @llvm.masked.gather.nxv2f64( %ptrs, i32 8, %mask, undef) + ret %vals +} + +define @masked_sgather_nxv2i8(i8* %base, %offsets, %mask) { +; CHECK-LABEL: masked_sgather_nxv2i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1sb { z0.d }, p0/z, [x0, z0.d, sxtw] +; CHECK-NEXT: ret + %ptrs = getelementptr i8, i8* %base, %offsets + %vals = call @llvm.masked.gather.nxv2i8( %ptrs, i32 1, %mask, undef) + %vals.sext = sext %vals to + ret %vals.sext +} + +define @masked_sgather_nxv2i16(i8* %base, %offsets, %mask) { +; CHECK-LABEL: masked_sgather_nxv2i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1sh { z0.d }, p0/z, [x0, z0.d, sxtw] +; CHECK-NEXT: ret + %byte_ptrs = getelementptr i8, i8* %base, %offsets + %ptrs = bitcast %byte_ptrs to + %vals = call @llvm.masked.gather.nxv2i16( %ptrs, i32 2, %mask, undef) + %vals.sext = sext %vals to + ret %vals.sext +} + +define @masked_sgather_nxv2i32(i8* %base, %offsets, %mask) { +; CHECK-LABEL: masked_sgather_nxv2i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1sw { z0.d }, p0/z, [x0, z0.d, sxtw] +; CHECK-NEXT: ret + %byte_ptrs = getelementptr i8, i8* %base, %offsets + %ptrs = bitcast %byte_ptrs to + %vals = call @llvm.masked.gather.nxv2i32( %ptrs, i32 4, %mask, undef) + %vals.sext = sext %vals to + ret %vals.sext +} + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +; unscaled packed 32-bit offsets +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +define @masked_gather_nxv4i8(i8* %base, %offsets, %mask) { +; CHECK-LABEL: masked_gather_nxv4i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1b { z0.s }, p0/z, [x0, z0.s, sxtw] +; CHECK-NEXT: ret + %ptrs = getelementptr i8, i8* %base, %offsets + %vals = call @llvm.masked.gather.nxv4i8( %ptrs, i32 1, %mask, undef) + %vals.zext = zext %vals to + ret %vals.zext +} + +define @masked_gather_nxv4i16(i8* %base, %offsets, %mask) { +; CHECK-LABEL: masked_gather_nxv4i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0, z0.s, sxtw] +; CHECK-NEXT: ret + %byte_ptrs = getelementptr i8, i8* %base, %offsets + %ptrs = bitcast %byte_ptrs to + %vals = call @llvm.masked.gather.nxv4i16( %ptrs, i32 2, %mask, undef) + %vals.zext = zext %vals to + ret %vals.zext +} + +define @masked_gather_nxv4i32(i8* %base, %offsets, %mask) { +; CHECK-LABEL: masked_gather_nxv4i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0, z0.s, sxtw] +; CHECK-NEXT: ret + %byte_ptrs = getelementptr i8, i8* %base, %offsets + %ptrs = bitcast %byte_ptrs to + %vals = call @llvm.masked.gather.nxv4i32( %ptrs, i32 4, %mask, undef) + ret %vals +} + +define @masked_gather_nxv4f16(i8* %base, %offsets, %mask) { +; CHECK-LABEL: masked_gather_nxv4f16: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0, z0.s, sxtw] +; CHECK-NEXT: ret + %byte_ptrs = getelementptr i8, i8* %base, %offsets + %ptrs = bitcast %byte_ptrs to + %vals = call @llvm.masked.gather.nxv4f16( %ptrs, i32 2, %mask, undef) + ret %vals +} + +define @masked_gather_nxv4f32(i8* %base, %offsets, %mask) { +; CHECK-LABEL: masked_gather_nxv4f32: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0, z0.s, sxtw] +; CHECK-NEXT: ret + %byte_ptrs = getelementptr i8, i8* %base, %offsets + %ptrs = bitcast %byte_ptrs to + %vals = call @llvm.masked.gather.nxv4f32( %ptrs, i32 4, %mask, undef) + ret %vals +} + +define @masked_sgather_nxv4i8(i8* %base, %offsets, %mask) { +; CHECK-LABEL: masked_sgather_nxv4i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1sb { z0.s }, p0/z, [x0, z0.s, sxtw] +; CHECK-NEXT: ret + %ptrs = getelementptr i8, i8* %base, %offsets + %vals = call @llvm.masked.gather.nxv4i8( %ptrs, i32 1, %mask, undef) + %vals.sext = sext %vals to + ret %vals.sext +} + +define @masked_sgather_nxv4i16(i8* %base, %offsets, %mask) { +; CHECK-LABEL: masked_sgather_nxv4i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1sh { z0.s }, p0/z, [x0, z0.s, sxtw] +; CHECK-NEXT: ret + %byte_ptrs = getelementptr i8, i8* %base, %offsets + %ptrs = bitcast %byte_ptrs to + %vals = call @llvm.masked.gather.nxv4i16( %ptrs, i32 2, %mask, undef) + %vals.sext = sext %vals to + ret %vals.sext +} + +declare @llvm.masked.gather.nxv2i8(, i32, , ) +declare @llvm.masked.gather.nxv2i16(, i32, , ) +declare @llvm.masked.gather.nxv2i32(, i32, , ) +declare @llvm.masked.gather.nxv2i64(, i32, , ) +declare @llvm.masked.gather.nxv2f16(, i32, , ) +declare @llvm.masked.gather.nxv2f32(, i32, , ) +declare @llvm.masked.gather.nxv2f64(, i32, , ) + +declare @llvm.masked.gather.nxv4i8(, i32, , ) +declare @llvm.masked.gather.nxv4i16(, i32, , ) +declare @llvm.masked.gather.nxv4i32(, i32, , ) +declare @llvm.masked.gather.nxv4f16(, i32, , ) +declare @llvm.masked.gather.nxv4f32(, i32, , ) Index: llvm/test/CodeGen/AArch64/sve-masked-gather-32b-unsigned-scaled.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AArch64/sve-masked-gather-32b-unsigned-scaled.ll @@ -0,0 +1,171 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve < %s | FileCheck %s + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +; unscaled unpacked 32-bit offsets +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +define @masked_gather_nxv2i16(i16* %base, %offsets, %mask) { +; CHECK-LABEL: masked_gather_nxv2i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, z0.d, uxtw #1] +; CHECK-NEXT: ret + %offsets.zext = zext %offsets to + %ptrs = getelementptr i16, i16* %base, %offsets.zext + %vals = call @llvm.masked.gather.nxv2i16( %ptrs, i32 2, %mask, undef) + %vals.zext = zext %vals to + ret %vals.zext +} + +define @masked_gather_nxv2i32(i32* %base, %offsets, %mask) { +; CHECK-LABEL: masked_gather_nxv2i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0, z0.d, uxtw #2] +; CHECK-NEXT: ret + %offsets.zext = zext %offsets to + %ptrs = getelementptr i32, i32* %base, %offsets.zext + %vals = call @llvm.masked.gather.nxv2i32( %ptrs, i32 4, %mask, undef) + %vals.zext = zext %vals to + ret %vals.zext +} + +define @masked_gather_nxv2i64(i64* %base, %offsets, %mask) { +; CHECK-LABEL: masked_gather_nxv2i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0, z0.d, uxtw #3] +; CHECK-NEXT: ret + %offsets.zext = zext %offsets to + %ptrs = getelementptr i64, i64* %base, %offsets.zext + %vals = call @llvm.masked.gather.nxv2i64( %ptrs, i32 8, %mask, undef) + ret %vals +} + +define @masked_gather_nxv2f16(half* %base, %offsets, %mask) { +; CHECK-LABEL: masked_gather_nxv2f16: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, z0.d, uxtw #1] +; CHECK-NEXT: ret + %offsets.zext = zext %offsets to + %ptrs = getelementptr half, half* %base, %offsets.zext + %vals = call @llvm.masked.gather.nxv2f16( %ptrs, i32 2, %mask, undef) + ret %vals +} + +define @masked_gather_nxv2f32(float* %base, %offsets, %mask) { +; CHECK-LABEL: masked_gather_nxv2f32: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0, z0.d, uxtw #2] +; CHECK-NEXT: ret + %offsets.zext = zext %offsets to + %ptrs = getelementptr float, float* %base, %offsets.zext + %vals = call @llvm.masked.gather.nxv2f32( %ptrs, i32 4, %mask, undef) + ret %vals +} + +define @masked_gather_nxv2f64(double* %base, %offsets, %mask) { +; CHECK-LABEL: masked_gather_nxv2f64: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0, z0.d, uxtw #3] +; CHECK-NEXT: ret + %offsets.zext = zext %offsets to + %ptrs = getelementptr double, double* %base, %offsets.zext + %vals = call @llvm.masked.gather.nxv2f64( %ptrs, i32 8, %mask, undef) + ret %vals +} + +define @masked_sgather_nxv2i16(i16* %base, %offsets, %mask) { +; CHECK-LABEL: masked_sgather_nxv2i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1sh { z0.d }, p0/z, [x0, z0.d, uxtw #1] +; CHECK-NEXT: ret + %offsets.zext = zext %offsets to + %ptrs = getelementptr i16, i16* %base, %offsets.zext + %vals = call @llvm.masked.gather.nxv2i16( %ptrs, i32 2, %mask, undef) + %vals.sext = sext %vals to + ret %vals.sext +} + +define @masked_sgather_nxv2i32(i32* %base, %offsets, %mask) { +; CHECK-LABEL: masked_sgather_nxv2i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1sw { z0.d }, p0/z, [x0, z0.d, uxtw #2] +; CHECK-NEXT: ret + %offsets.zext = zext %offsets to + %ptrs = getelementptr i32, i32* %base, %offsets.zext + %vals = call @llvm.masked.gather.nxv2i32( %ptrs, i32 4, %mask, undef) + %vals.sext = sext %vals to + ret %vals.sext +} + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +; unscaled packed 32-bit offsets +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +define @masked_gather_nxv4i16(i16* %base, %offsets, %mask) { +; CHECK-LABEL: masked_gather_nxv4i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0, z0.s, uxtw #1] +; CHECK-NEXT: ret + %offsets.zext = zext %offsets to + %ptrs = getelementptr i16, i16* %base, %offsets.zext + %vals = call @llvm.masked.gather.nxv4i16( %ptrs, i32 2, %mask, undef) + %vals.zext = zext %vals to + ret %vals.zext +} + +define @masked_gather_nxv4i32(i32* %base, %offsets, %mask) { +; CHECK-LABEL: masked_gather_nxv4i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0, z0.s, uxtw #2] +; CHECK-NEXT: ret + %offsets.zext = zext %offsets to + %ptrs = getelementptr i32, i32* %base, %offsets.zext + %vals = call @llvm.masked.gather.nxv4i32( %ptrs, i32 4, %mask, undef) + ret %vals +} + +define @masked_gather_nxv4f16(half* %base, %offsets, %mask) { +; CHECK-LABEL: masked_gather_nxv4f16: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0, z0.s, uxtw #1] +; CHECK-NEXT: ret + %offsets.zext = zext %offsets to + %ptrs = getelementptr half, half* %base, %offsets.zext + %vals = call @llvm.masked.gather.nxv4f16( %ptrs, i32 2, %mask, undef) + ret %vals +} + +define @masked_gather_nxv4f32(float* %base, %offsets, %mask) { +; CHECK-LABEL: masked_gather_nxv4f32: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0, z0.s, uxtw #2] +; CHECK-NEXT: ret + %offsets.zext = zext %offsets to + %ptrs = getelementptr float, float* %base, %offsets.zext + %vals = call @llvm.masked.gather.nxv4f32( %ptrs, i32 4, %mask, undef) + ret %vals +} + +define @masked_sgather_nxv4i16(i16* %base, %offsets, %mask) { +; CHECK-LABEL: masked_sgather_nxv4i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1sh { z0.s }, p0/z, [x0, z0.s, uxtw #1] +; CHECK-NEXT: ret + %offsets.zext = zext %offsets to + %ptrs = getelementptr i16, i16* %base, %offsets.zext + %vals = call @llvm.masked.gather.nxv4i16( %ptrs, i32 2, %mask, undef) + %vals.sext = sext %vals to + ret %vals.sext +} + +declare @llvm.masked.gather.nxv2i16(, i32, , ) +declare @llvm.masked.gather.nxv2i32(, i32, , ) +declare @llvm.masked.gather.nxv2i64(, i32, , ) +declare @llvm.masked.gather.nxv2f16(, i32, , ) +declare @llvm.masked.gather.nxv2f32(, i32, , ) +declare @llvm.masked.gather.nxv2f64(, i32, , ) + +declare @llvm.masked.gather.nxv4i16(, i32, , ) +declare @llvm.masked.gather.nxv4i32(, i32, , ) +declare @llvm.masked.gather.nxv4f16(, i32, , ) +declare @llvm.masked.gather.nxv4f32(, i32, , ) Index: llvm/test/CodeGen/AArch64/sve-masked-gather-32b-unsigned-unscaled.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AArch64/sve-masked-gather-32b-unsigned-unscaled.ll @@ -0,0 +1,234 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve < %s | FileCheck %s + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +; unscaled unpacked 32-bit offsets +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +define @masked_gather_nxv2i8(i8* %base, %offsets, %mask) { +; CHECK-LABEL: masked_gather_nxv2i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0, z0.d, uxtw] +; CHECK-NEXT: ret + %offsets.zext = zext %offsets to + %ptrs = getelementptr i8, i8* %base, %offsets.zext + %vals = call @llvm.masked.gather.nxv2i8( %ptrs, i32 1, %mask, undef) + %vals.zext = zext %vals to + ret %vals.zext +} + +define @masked_gather_nxv2i16(i8* %base, %offsets, %mask) { +; CHECK-LABEL: masked_gather_nxv2i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, z0.d, uxtw] +; CHECK-NEXT: ret + %offsets.zext = zext %offsets to + %byte_ptrs = getelementptr i8, i8* %base, %offsets.zext + %ptrs = bitcast %byte_ptrs to + %vals = call @llvm.masked.gather.nxv2i16( %ptrs, i32 2, %mask, undef) + %vals.zext = zext %vals to + ret %vals.zext +} + +define @masked_gather_nxv2i32(i8* %base, %offsets, %mask) { +; CHECK-LABEL: masked_gather_nxv2i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0, z0.d, uxtw] +; CHECK-NEXT: ret + %offsets.zext = zext %offsets to + %byte_ptrs = getelementptr i8, i8* %base, %offsets.zext + %ptrs = bitcast %byte_ptrs to + %vals = call @llvm.masked.gather.nxv2i32( %ptrs, i32 4, %mask, undef) + %vals.zext = zext %vals to + ret %vals.zext +} + +define @masked_gather_nxv2i64(i8* %base, %offsets, %mask) { +; CHECK-LABEL: masked_gather_nxv2i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0, z0.d, uxtw] +; CHECK-NEXT: ret + %offsets.zext = zext %offsets to + %byte_ptrs = getelementptr i8, i8* %base, %offsets.zext + %ptrs = bitcast %byte_ptrs to + %vals = call @llvm.masked.gather.nxv2i64( %ptrs, i32 8, %mask, undef) + ret %vals +} + +define @masked_gather_nxv2f16(i8* %base, %offsets, %mask) { +; CHECK-LABEL: masked_gather_nxv2f16: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, z0.d, uxtw] +; CHECK-NEXT: ret + %offsets.zext = zext %offsets to + %byte_ptrs = getelementptr i8, i8* %base, %offsets.zext + %ptrs = bitcast %byte_ptrs to + %vals = call @llvm.masked.gather.nxv2f16( %ptrs, i32 2, %mask, undef) + ret %vals +} + +define @masked_gather_nxv2f32(i8* %base, %offsets, %mask) { +; CHECK-LABEL: masked_gather_nxv2f32: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0, z0.d, uxtw] +; CHECK-NEXT: ret + %offsets.zext = zext %offsets to + %byte_ptrs = getelementptr i8, i8* %base, %offsets.zext + %ptrs = bitcast %byte_ptrs to + %vals = call @llvm.masked.gather.nxv2f32( %ptrs, i32 4, %mask, undef) + ret %vals +} + +define @masked_gather_nxv2f64(i8* %base, %offsets, %mask) { +; CHECK-LABEL: masked_gather_nxv2f64: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0, z0.d, uxtw] +; CHECK-NEXT: ret + %offsets.zext = zext %offsets to + %byte_ptrs = getelementptr i8, i8* %base, %offsets.zext + %ptrs = bitcast %byte_ptrs to + %vals = call @llvm.masked.gather.nxv2f64( %ptrs, i32 8, %mask, undef) + ret %vals +} + +define @masked_sgather_nxv2i8(i8* %base, %offsets, %mask) { +; CHECK-LABEL: masked_sgather_nxv2i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1sb { z0.d }, p0/z, [x0, z0.d, uxtw] +; CHECK-NEXT: ret + %offsets.zext = zext %offsets to + %ptrs = getelementptr i8, i8* %base, %offsets.zext + %vals = call @llvm.masked.gather.nxv2i8( %ptrs, i32 1, %mask, undef) + %vals.sext = sext %vals to + ret %vals.sext +} + +define @masked_sgather_nxv2i16(i8* %base, %offsets, %mask) { +; CHECK-LABEL: masked_sgather_nxv2i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1sh { z0.d }, p0/z, [x0, z0.d, uxtw] +; CHECK-NEXT: ret + %offsets.zext = zext %offsets to + %byte_ptrs = getelementptr i8, i8* %base, %offsets.zext + %ptrs = bitcast %byte_ptrs to + %vals = call @llvm.masked.gather.nxv2i16( %ptrs, i32 2, %mask, undef) + %vals.sext = sext %vals to + ret %vals.sext +} + +define @masked_sgather_nxv2i32(i8* %base, %offsets, %mask) { +; CHECK-LABEL: masked_sgather_nxv2i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1sw { z0.d }, p0/z, [x0, z0.d, uxtw] +; CHECK-NEXT: ret + %offsets.zext = zext %offsets to + %byte_ptrs = getelementptr i8, i8* %base, %offsets.zext + %ptrs = bitcast %byte_ptrs to + %vals = call @llvm.masked.gather.nxv2i32( %ptrs, i32 4, %mask, undef) + %vals.sext = sext %vals to + ret %vals.sext +} + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +; unscaled packed 32-bit offsets +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +define @masked_gather_nxv4i8(i8* %base, %offsets, %mask) { +; CHECK-LABEL: masked_gather_nxv4i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1b { z0.s }, p0/z, [x0, z0.s, uxtw] +; CHECK-NEXT: ret + %offsets.zext = zext %offsets to + %ptrs = getelementptr i8, i8* %base, %offsets.zext + %vals = call @llvm.masked.gather.nxv4i8( %ptrs, i32 1, %mask, undef) + %vals.zext = zext %vals to + ret %vals.zext +} + +define @masked_gather_nxv4i16(i8* %base, %offsets, %mask) { +; CHECK-LABEL: masked_gather_nxv4i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0, z0.s, uxtw] +; CHECK-NEXT: ret + %offsets.zext = zext %offsets to + %byte_ptrs = getelementptr i8, i8* %base, %offsets.zext + %ptrs = bitcast %byte_ptrs to + %vals = call @llvm.masked.gather.nxv4i16( %ptrs, i32 2, %mask, undef) + %vals.zext = zext %vals to + ret %vals.zext +} + +define @masked_gather_nxv4i32(i8* %base, %offsets, %mask) { +; CHECK-LABEL: masked_gather_nxv4i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0, z0.s, uxtw] +; CHECK-NEXT: ret + %offsets.zext = zext %offsets to + %byte_ptrs = getelementptr i8, i8* %base, %offsets.zext + %ptrs = bitcast %byte_ptrs to + %vals = call @llvm.masked.gather.nxv4i32( %ptrs, i32 4, %mask, undef) + ret %vals +} + +define @masked_gather_nxv4f16(i8* %base, %offsets, %mask) { +; CHECK-LABEL: masked_gather_nxv4f16: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0, z0.s, uxtw] +; CHECK-NEXT: ret + %offsets.zext = zext %offsets to + %byte_ptrs = getelementptr i8, i8* %base, %offsets.zext + %ptrs = bitcast %byte_ptrs to + %vals = call @llvm.masked.gather.nxv4f16( %ptrs, i32 2, %mask, undef) + ret %vals +} + +define @masked_gather_nxv4f32(i8* %base, %offsets, %mask) { +; CHECK-LABEL: masked_gather_nxv4f32: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0, z0.s, uxtw] +; CHECK-NEXT: ret + %offsets.zext = zext %offsets to + %byte_ptrs = getelementptr i8, i8* %base, %offsets.zext + %ptrs = bitcast %byte_ptrs to + %vals = call @llvm.masked.gather.nxv4f32( %ptrs, i32 4, %mask, undef) + ret %vals +} + +define @masked_sgather_nxv4i8(i8* %base, %offsets, %mask) { +; CHECK-LABEL: masked_sgather_nxv4i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1sb { z0.s }, p0/z, [x0, z0.s, uxtw] +; CHECK-NEXT: ret + %offsets.zext = zext %offsets to + %ptrs = getelementptr i8, i8* %base, %offsets.zext + %vals = call @llvm.masked.gather.nxv4i8( %ptrs, i32 1, %mask, undef) + %vals.sext = sext %vals to + ret %vals.sext +} + +define @masked_sgather_nxv4i16(i8* %base, %offsets, %mask) { +; CHECK-LABEL: masked_sgather_nxv4i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1sh { z0.s }, p0/z, [x0, z0.s, uxtw] +; CHECK-NEXT: ret + %offsets.zext = zext %offsets to + %byte_ptrs = getelementptr i8, i8* %base, %offsets.zext + %ptrs = bitcast %byte_ptrs to + %vals = call @llvm.masked.gather.nxv4i16( %ptrs, i32 2, %mask, undef) + %vals.sext = sext %vals to + ret %vals.sext +} + +declare @llvm.masked.gather.nxv2i8(, i32, , ) +declare @llvm.masked.gather.nxv2i16(, i32, , ) +declare @llvm.masked.gather.nxv2i32(, i32, , ) +declare @llvm.masked.gather.nxv2i64(, i32, , ) +declare @llvm.masked.gather.nxv2f16(, i32, , ) +declare @llvm.masked.gather.nxv2f32(, i32, , ) +declare @llvm.masked.gather.nxv2f64(, i32, , ) + +declare @llvm.masked.gather.nxv4i8(, i32, , ) +declare @llvm.masked.gather.nxv4i16(, i32, , ) +declare @llvm.masked.gather.nxv4i32(, i32, , ) +declare @llvm.masked.gather.nxv4f16(, i32, , ) +declare @llvm.masked.gather.nxv4f32(, i32, , ) Index: llvm/test/CodeGen/AArch64/sve-masked-gather-64b-scaled.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AArch64/sve-masked-gather-64b-scaled.ll @@ -0,0 +1,93 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve < %s | FileCheck %s + +define @masked_gather_nxv2i16(i16* %base, %offsets, %mask) { +; CHECK-LABEL: masked_gather_nxv2i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, z0.d, lsl #1] +; CHECK-NEXT: ret + %ptrs = getelementptr i16, i16* %base, %offsets + %vals = call @llvm.masked.gather.nxv2i16( %ptrs, i32 2, %mask, undef) + %vals.zext = zext %vals to + ret %vals.zext +} + +define @masked_gather_nxv2i32(i32* %base, %offsets, %mask) { +; CHECK-LABEL: masked_gather_nxv2i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0, z0.d, lsl #2] +; CHECK-NEXT: ret + %ptrs = getelementptr i32, i32* %base, %offsets + %vals = call @llvm.masked.gather.nxv2i32( %ptrs, i32 4, %mask, undef) + %vals.zext = zext %vals to + ret %vals.zext +} + +define @masked_gather_nxv2i64(i64* %base, %offsets, %mask) { +; CHECK-LABEL: masked_gather_nxv2i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0, z0.d, lsl #3] +; CHECK-NEXT: ret + %ptrs = getelementptr i64, i64* %base, %offsets + %vals = call @llvm.masked.gather.nxv2i64( %ptrs, i32 8, %mask, undef) + ret %vals +} + +define @masked_gather_nxv2f16(half* %base, %offsets, %mask) { +; CHECK-LABEL: masked_gather_nxv2f16: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, z0.d, lsl #1] +; CHECK-NEXT: ret + %ptrs = getelementptr half, half* %base, %offsets + %vals = call @llvm.masked.gather.nxv2f16( %ptrs, i32 2, %mask, undef) + ret %vals +} + +define @masked_gather_nxv2f32(float* %base, %offsets, %mask) { +; CHECK-LABEL: masked_gather_nxv2f32: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0, z0.d, lsl #2] +; CHECK-NEXT: ret + %ptrs = getelementptr float, float* %base, %offsets + %vals = call @llvm.masked.gather.nxv2f32( %ptrs, i32 4, %mask, undef) + ret %vals +} + +define @masked_gather_nxv2f64(double* %base, %offsets, %mask) { +; CHECK-LABEL: masked_gather_nxv2f64: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0, z0.d, lsl #3] +; CHECK-NEXT: ret + %ptrs = getelementptr double, double* %base, %offsets + %vals.sext = call @llvm.masked.gather.nxv2f64( %ptrs, i32 8, %mask, undef) + ret %vals.sext +} + +define @masked_sgather_nxv2i16(i16* %base, %offsets, %mask) { +; CHECK-LABEL: masked_sgather_nxv2i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1sh { z0.d }, p0/z, [x0, z0.d, lsl #1] +; CHECK-NEXT: ret + %ptrs = getelementptr i16, i16* %base, %offsets + %vals = call @llvm.masked.gather.nxv2i16( %ptrs, i32 2, %mask, undef) + %vals.sext = sext %vals to + ret %vals.sext +} + +define @masked_sgather_nxv2i32(i32* %base, %offsets, %mask) { +; CHECK-LABEL: masked_sgather_nxv2i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1sw { z0.d }, p0/z, [x0, z0.d, lsl #2] +; CHECK-NEXT: ret + %ptrs = getelementptr i32, i32* %base, %offsets + %vals = call @llvm.masked.gather.nxv2i32( %ptrs, i32 4, %mask, undef) + %vals.sext = sext %vals to + ret %vals.sext +} + +declare @llvm.masked.gather.nxv2i16(, i32, , ) +declare @llvm.masked.gather.nxv2i32(, i32, , ) +declare @llvm.masked.gather.nxv2i64(, i32, , ) +declare @llvm.masked.gather.nxv2f16(, i32, , ) +declare @llvm.masked.gather.nxv2f32(, i32, , ) +declare @llvm.masked.gather.nxv2f64(, i32, , ) Index: llvm/test/CodeGen/AArch64/sve-masked-gather-64b-unscaled.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AArch64/sve-masked-gather-64b-unscaled.ll @@ -0,0 +1,124 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve < %s | FileCheck %s + +define @masked_gather_nxv2i8(i8* %base, %offsets, %mask) { +; CHECK-LABEL: masked_gather_nxv2i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0, z0.d] +; CHECK-NEXT: ret + %ptrs = getelementptr i8, i8* %base, %offsets + %vals = call @llvm.masked.gather.nxv2i8( %ptrs, i32 1, %mask, undef) + %vals.zext = zext %vals to + ret %vals.zext +} + +define @masked_gather_nxv2i16(i8* %base, %offsets, %mask) { +; CHECK-LABEL: masked_gather_nxv2i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, z0.d] +; CHECK-NEXT: ret + %byte_ptrs = getelementptr i8, i8* %base, %offsets + %ptrs = bitcast %byte_ptrs to + %vals = call @llvm.masked.gather.nxv2i16( %ptrs, i32 2, %mask, undef) + %vals.zext = zext %vals to + ret %vals.zext +} + +define @masked_gather_nxv2i32(i8* %base, %offsets, %mask) { +; CHECK-LABEL: masked_gather_nxv2i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0, z0.d] +; CHECK-NEXT: ret + %byte_ptrs = getelementptr i8, i8* %base, %offsets + %ptrs = bitcast %byte_ptrs to + %vals = call @llvm.masked.gather.nxv2i32( %ptrs, i32 4, %mask, undef) + %vals.zext = zext %vals to + ret %vals.zext +} + +define @masked_gather_nxv2i64(i8* %base, %offsets, %mask) { +; CHECK-LABEL: masked_gather_nxv2i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0, z0.d] +; CHECK-NEXT: ret + %byte_ptrs = getelementptr i8, i8* %base, %offsets + %ptrs = bitcast %byte_ptrs to + %vals = call @llvm.masked.gather.nxv2i64( %ptrs, i32 8, %mask, undef) + ret %vals +} + +define @masked_gather_nxv2f16(i8* %base, %offsets, %mask) { +; CHECK-LABEL: masked_gather_nxv2f16: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, z0.d] +; CHECK-NEXT: ret + %byte_ptrs = getelementptr i8, i8* %base, %offsets + %ptrs = bitcast %byte_ptrs to + %vals = call @llvm.masked.gather.nxv2f16( %ptrs, i32 2, %mask, undef) + ret %vals +} + +define @masked_gather_nxv2f32(i8* %base, %offsets, %mask) { +; CHECK-LABEL: masked_gather_nxv2f32: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0, z0.d] +; CHECK-NEXT: ret + %byte_ptrs = getelementptr i8, i8* %base, %offsets + %ptrs = bitcast %byte_ptrs to + %vals = call @llvm.masked.gather.nxv2f32( %ptrs, i32 4, %mask, undef) + ret %vals +} + +define @masked_gather_nxv2f64(i8* %base, %offsets, %mask) { +; CHECK-LABEL: masked_gather_nxv2f64: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0, z0.d] +; CHECK-NEXT: ret + %byte_ptrs = getelementptr i8, i8* %base, %offsets + %ptrs = bitcast %byte_ptrs to + %vals = call @llvm.masked.gather.nxv2f64( %ptrs, i32 8, %mask, undef) + ret %vals +} + +define @masked_sgather_nxv2i8(i8* %base, %offsets, %mask) { +; CHECK-LABEL: masked_sgather_nxv2i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1sb { z0.d }, p0/z, [x0, z0.d] +; CHECK-NEXT: ret + %ptrs = getelementptr i8, i8* %base, %offsets + %vals = call @llvm.masked.gather.nxv2i8( %ptrs, i32 1, %mask, undef) + %vals.sext = sext %vals to + ret %vals.sext +} + +define @masked_sgather_nxv2i16(i8* %base, %offsets, %mask) { +; CHECK-LABEL: masked_sgather_nxv2i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1sh { z0.d }, p0/z, [x0, z0.d] +; CHECK-NEXT: ret + %byte_ptrs = getelementptr i8, i8* %base, %offsets + %ptrs = bitcast %byte_ptrs to + %vals = call @llvm.masked.gather.nxv2i16( %ptrs, i32 2, %mask, undef) + %vals.sext = sext %vals to + ret %vals.sext +} + +define @masked_sgather_nxv2i32(i8* %base, %offsets, %mask) { +; CHECK-LABEL: masked_sgather_nxv2i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1sw { z0.d }, p0/z, [x0, z0.d] +; CHECK-NEXT: ret + %byte_ptrs = getelementptr i8, i8* %base, %offsets + %ptrs = bitcast %byte_ptrs to + %vals = call @llvm.masked.gather.nxv2i32( %ptrs, i32 4, %mask, undef) + %vals.sext = sext %vals to + ret %vals.sext +} + +declare @llvm.masked.gather.nxv2i8(, i32, , ) +declare @llvm.masked.gather.nxv2i16(, i32, , ) +declare @llvm.masked.gather.nxv2i32(, i32, , ) +declare @llvm.masked.gather.nxv2i64(, i32, , ) +declare @llvm.masked.gather.nxv2f16(, i32, , ) +declare @llvm.masked.gather.nxv2f32(, i32, , ) +declare @llvm.masked.gather.nxv2f64(, i32, , ) Index: llvm/test/CodeGen/X86/masked_gather_scatter.ll =================================================================== --- llvm/test/CodeGen/X86/masked_gather_scatter.ll +++ llvm/test/CodeGen/X86/masked_gather_scatter.ll @@ -765,45 +765,41 @@ define <16 x float> @test14(float* %base, i32 %ind, <16 x float*> %vec) { ; KNL_64-LABEL: test14: ; KNL_64: # %bb.0: -; KNL_64-NEXT: vpbroadcastq %xmm0, %zmm0 -; KNL_64-NEXT: vmovd %esi, %xmm1 -; KNL_64-NEXT: vpbroadcastd %xmm1, %ymm1 -; KNL_64-NEXT: vpmovsxdq %ymm1, %zmm1 -; KNL_64-NEXT: vpsllq $2, %zmm1, %zmm1 -; KNL_64-NEXT: vpaddq %zmm1, %zmm0, %zmm0 +; KNL_64-NEXT: vmovq %xmm0, %rax +; KNL_64-NEXT: vmovd %esi, %xmm0 +; KNL_64-NEXT: vpbroadcastd %xmm0, %ymm0 +; KNL_64-NEXT: vpmovsxdq %ymm0, %zmm0 +; KNL_64-NEXT: vpsllq $2, %zmm0, %zmm0 ; KNL_64-NEXT: kxnorw %k0, %k0, %k1 -; KNL_64-NEXT: vgatherqps (,%zmm0), %ymm1 {%k1} +; KNL_64-NEXT: vgatherqps (%rax,%zmm0), %ymm1 {%k1} ; KNL_64-NEXT: vinsertf64x4 $1, %ymm1, %zmm1, %zmm0 ; KNL_64-NEXT: retq ; ; KNL_32-LABEL: test14: ; KNL_32: # %bb.0: -; KNL_32-NEXT: vpbroadcastd %xmm0, %zmm0 +; KNL_32-NEXT: vmovd %xmm0, %eax ; KNL_32-NEXT: vpslld $2, {{[0-9]+}}(%esp){1to16}, %zmm1 -; KNL_32-NEXT: vpaddd %zmm1, %zmm0, %zmm1 ; KNL_32-NEXT: kxnorw %k0, %k0, %k1 -; KNL_32-NEXT: vgatherdps (,%zmm1), %zmm0 {%k1} +; KNL_32-NEXT: vgatherdps (%eax,%zmm1), %zmm0 {%k1} ; KNL_32-NEXT: retl ; ; SKX-LABEL: test14: ; SKX: # %bb.0: -; SKX-NEXT: vpbroadcastq %xmm0, %zmm0 -; SKX-NEXT: vpbroadcastd %esi, %ymm1 -; SKX-NEXT: vpmovsxdq %ymm1, %zmm1 -; SKX-NEXT: vpsllq $2, %zmm1, %zmm1 -; SKX-NEXT: vpaddq %zmm1, %zmm0, %zmm0 +; SKX-NEXT: vmovq %xmm0, %rax +; SKX-NEXT: vpbroadcastd %esi, %ymm0 +; SKX-NEXT: vpmovsxdq %ymm0, %zmm0 +; SKX-NEXT: vpsllq $2, %zmm0, %zmm0 ; SKX-NEXT: kxnorw %k0, %k0, %k1 -; SKX-NEXT: vgatherqps (,%zmm0), %ymm1 {%k1} +; SKX-NEXT: vgatherqps (%rax,%zmm0), %ymm1 {%k1} ; SKX-NEXT: vinsertf64x4 $1, %ymm1, %zmm1, %zmm0 ; SKX-NEXT: retq ; ; SKX_32-LABEL: test14: ; SKX_32: # %bb.0: -; SKX_32-NEXT: vpbroadcastd %xmm0, %zmm0 +; SKX_32-NEXT: vmovd %xmm0, %eax ; SKX_32-NEXT: vpslld $2, {{[0-9]+}}(%esp){1to16}, %zmm1 -; SKX_32-NEXT: vpaddd %zmm1, %zmm0, %zmm1 ; SKX_32-NEXT: kxnorw %k0, %k0, %k1 -; SKX_32-NEXT: vgatherdps (,%zmm1), %zmm0 {%k1} +; SKX_32-NEXT: vgatherdps (%eax,%zmm1), %zmm0 {%k1} ; SKX_32-NEXT: retl %broadcast.splatinsert = insertelement <16 x float*> %vec, float* %base, i32 1