Index: llvm/lib/Target/AArch64/AArch64ISelLowering.h =================================================================== --- llvm/lib/Target/AArch64/AArch64ISelLowering.h +++ llvm/lib/Target/AArch64/AArch64ISelLowering.h @@ -196,7 +196,7 @@ UUNPKHI, UUNPKLO, - // Unsigned gather loads. + // Unsigned gather loads GLD1, GLD1_SCALED, GLD1_UXTW, @@ -205,6 +205,15 @@ GLD1_SXTW_SCALED, GLD1_IMM, + // Signed gather loads + GLD1S, + GLD1S_SCALED, + GLD1S_UXTW, + GLD1S_SXTW, + GLD1S_UXTW_SCALED, + GLD1S_SXTW_SCALED, + GLD1S_IMM, + // NEON Load/Store with post-increment base updates LD2post = ISD::FIRST_TARGET_MEMORY_OPCODE, LD3post, Index: llvm/lib/Target/AArch64/AArch64ISelLowering.cpp =================================================================== --- llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -614,6 +614,7 @@ setTargetDAGCombine(ISD::ANY_EXTEND); setTargetDAGCombine(ISD::ZERO_EXTEND); setTargetDAGCombine(ISD::SIGN_EXTEND); + setTargetDAGCombine(ISD::SIGN_EXTEND_INREG); setTargetDAGCombine(ISD::BITCAST); setTargetDAGCombine(ISD::CONCAT_VECTORS); setTargetDAGCombine(ISD::STORE); @@ -828,6 +829,15 @@ if (isTypeLegal(VT) && VT.getVectorElementType() != MVT::i1) setOperationAction(ISD::SPLAT_VECTOR, VT, Custom); } + setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::nxv2i64, Legal); + setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::nxv2i32, Legal); + setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::nxv2i16, Legal); + setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::nxv2i8, Legal); + setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::nxv4i32, Legal); + setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::nxv4i16, Legal); + setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::nxv4i8, Legal); + setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::nxv8i16, Legal); + setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::nxv8i8, Legal); } PredictableSelectIsExpensive = Subtarget->predictableSelectIsExpensive(); @@ -1340,6 +1350,13 @@ case AArch64ISD::GLD1_SXTW_SCALED: return "AArch64ISD::GLD1_SXTW_SCALED"; case AArch64ISD::GLD1_UXTW_SCALED: return "AArch64ISD::GLD1_UXTW_SCALED"; case AArch64ISD::GLD1_IMM: return "AArch64ISD::GLD1_IMM"; + case AArch64ISD::GLD1S: return "AArch64ISD::GLD1S"; + case AArch64ISD::GLD1S_SCALED: return "AArch64ISD::GLD1S_SCALED"; + case AArch64ISD::GLD1S_SXTW: return "AArch64ISD::GLD1S_SXTW"; + case AArch64ISD::GLD1S_UXTW: return "AArch64ISD::GLD1S_UXTW"; + case AArch64ISD::GLD1S_SXTW_SCALED: return "AArch64ISD::GLD1S_SXTW_SCALED"; + case AArch64ISD::GLD1S_UXTW_SCALED: return "AArch64ISD::GLD1S_UXTW_SCALED"; + case AArch64ISD::GLD1S_IMM: return "AArch64ISD::GLD1S_IMM"; } return nullptr; } @@ -9872,14 +9889,72 @@ return SDValue(); } +static bool isConstantSplatVectorMaskForType(SDNode *N, EVT MemVT) { + if (!MemVT.getVectorElementType().isSimple()) + return false; + + uint64_t MaskForTy = 0ull; + switch (MemVT.getVectorElementType().getSimpleVT().SimpleTy) { + case MVT::i8: + MaskForTy = 0xffull; + break; + case MVT::i16: + MaskForTy = 0xffffull; + break; + case MVT::i32: + MaskForTy = 0xffffffffull; + break; + default: + return false; + break; + } + + if (N->getOpcode() == AArch64ISD::DUP || N->getOpcode() == ISD::SPLAT_VECTOR) + if (auto *Op0 = dyn_cast(N->getOperand(0))) + return Op0->getAPIntValue().getLimitedValue() == MaskForTy; + + return false; +} + +static SDValue performSVEAndCombine(SDNode *N, + TargetLowering::DAGCombinerInfo &DCI, + const AArch64Subtarget *Subtarget) { + assert(N->getOpcode() == ISD::AND && "Expected ISD::AND"); + assert(N->getValueType(0).isScalableVector() && "Expected SVE vector"); + + if (DCI.isBeforeLegalizeOps()) + return SDValue(); + + SDValue Src = N->getOperand(0); + SDValue Mask = N->getOperand(1); + unsigned Opc = Src->getOpcode(); + + // These instructions perform an implicit zero-extend. + if ((Opc == AArch64ISD::GLD1) || (Opc == AArch64ISD::GLD1_SCALED) || + (Opc == AArch64ISD::GLD1_SXTW) || (Opc == AArch64ISD::GLD1_SXTW_SCALED) || + (Opc == AArch64ISD::GLD1_UXTW) || + (Opc == AArch64ISD::GLD1_UXTW_SCALED || (Opc == AArch64ISD::GLD1_IMM))) { + EVT MemVT = cast(Src->getOperand(4))->getVT(); + + if (isConstantSplatVectorMaskForType(Mask.getNode(), MemVT)) + return Src; + } + + return SDValue(); +} + static SDValue performANDCombine(SDNode *N, - TargetLowering::DAGCombinerInfo &DCI) { + TargetLowering::DAGCombinerInfo &DCI, + const AArch64Subtarget *Subtarget) { SelectionDAG &DAG = DCI.DAG; SDValue LHS = N->getOperand(0); EVT VT = N->getValueType(0); if (!VT.isVector() || !DAG.getTargetLoweringInfo().isTypeLegal(VT)) return SDValue(); + if (VT.isScalableVector()) + return performSVEAndCombine(N, DCI, Subtarget); + BuildVectorSDNode *BVN = dyn_cast(N->getOperand(1).getNode()); if (!BVN) @@ -11833,6 +11908,68 @@ return DAG.getMergeValues({Load, LoadChain}, DL); } +static SDValue +performSignExtendInRegCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, + SelectionDAG &DAG) { + if (DCI.isBeforeLegalizeOps()) + return SDValue(); + + SDValue Src = N->getOperand(0); + unsigned Opc = Src->getOpcode(); + + // The following SVE opcodes have sign-extending equivalents, which are + // perfect candidates for DAG Combine. For example, e.g. AArch64ISD::GLD1 + + // SIGN_EXTEND_INREG = AArch64ISD::GLD1S. For other opcodes bail out. + if ((Opc != AArch64ISD::GLD1) && (Opc != AArch64ISD::GLD1_SCALED) && + (Opc != AArch64ISD::GLD1_SXTW) && (Opc != AArch64ISD::GLD1_SXTW_SCALED) && + (Opc != AArch64ISD::GLD1_UXTW) && (Opc != AArch64ISD::GLD1_UXTW_SCALED) && + (Opc != AArch64ISD::GLD1_IMM)) + return SDValue(); + + EVT SignExtSrcVT = cast(N->getOperand(1))->getVT(); + EVT GLD1SrcMemVT = cast(N->getOperand(0)->getOperand(4))->getVT(); + + if ((SignExtSrcVT != GLD1SrcMemVT) || !Src.hasOneUse()) + return SDValue(); + + EVT DstVT = N->getValueType(0); + SDVTList VTs = DAG.getVTList(DstVT, MVT::Other); + SDValue Ops[] = {Src->getOperand(0), Src->getOperand(1), Src->getOperand(2), + Src->getOperand(3), Src->getOperand(4)}; + + unsigned NewOpc; + switch (Opc) { + case AArch64ISD::GLD1: + NewOpc = AArch64ISD::GLD1S; + break; + case AArch64ISD::GLD1_SCALED: + NewOpc = AArch64ISD::GLD1S_SCALED; + break; + case AArch64ISD::GLD1_SXTW: + NewOpc = AArch64ISD::GLD1S_SXTW; + break; + case AArch64ISD::GLD1_SXTW_SCALED: + NewOpc = AArch64ISD::GLD1S_SXTW_SCALED; + break; + case AArch64ISD::GLD1_UXTW: + NewOpc = AArch64ISD::GLD1S_UXTW; + break; + case AArch64ISD::GLD1_UXTW_SCALED: + NewOpc = AArch64ISD::GLD1S_UXTW_SCALED; + break; + case AArch64ISD::GLD1_IMM: + NewOpc = AArch64ISD::GLD1S_IMM; + break; + } + + SDValue ExtLoad = DAG.getNode(NewOpc, SDLoc(N), VTs, Ops); + DCI.CombineTo(N, ExtLoad); + DCI.CombineTo(Src.getNode(), ExtLoad, ExtLoad.getValue(1)); + + // Return N so it doesn't get rechecked! + return SDValue(N, 0); +} + SDValue AArch64TargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const { SelectionDAG &DAG = DCI.DAG; @@ -11858,7 +11995,7 @@ case ISD::OR: return performORCombine(N, DCI, Subtarget); case ISD::AND: - return performANDCombine(N, DCI); + return performANDCombine(N, DCI, Subtarget); case ISD::SRL: return performSRLCombine(N, DCI); case ISD::INTRINSIC_WO_CHAIN: @@ -11867,6 +12004,8 @@ case ISD::ZERO_EXTEND: case ISD::SIGN_EXTEND: return performExtendCombine(N, DCI, DAG); + case ISD::SIGN_EXTEND_INREG: + return performSignExtendInRegCombine(N, DCI, DAG); case ISD::BITCAST: return performBitcastCombine(N, DCI, DAG); case ISD::CONCAT_VECTORS: Index: llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td =================================================================== --- llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td +++ llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td @@ -28,6 +28,14 @@ def AArch64ld1_gather_sxtw_scaled : SDNode<"AArch64ISD::GLD1_SXTW_SCALED", SDT_AArch64_GLD1, [SDNPHasChain, SDNPMayLoad, SDNPOptInGlue]>; def AArch64ld1_gather_imm : SDNode<"AArch64ISD::GLD1_IMM", SDT_AArch64_GLD1_IMM, [SDNPHasChain, SDNPMayLoad, SDNPOptInGlue]>; +def AArch64ld1s_gather : SDNode<"AArch64ISD::GLD1S", SDT_AArch64_GLD1, [SDNPHasChain, SDNPMayLoad, SDNPOptInGlue]>; +def AArch64ld1s_gather_scaled : SDNode<"AArch64ISD::GLD1S_SCALED", SDT_AArch64_GLD1, [SDNPHasChain, SDNPMayLoad, SDNPOptInGlue]>; +def AArch64ld1s_gather_uxtw : SDNode<"AArch64ISD::GLD1S_UXTW", SDT_AArch64_GLD1, [SDNPHasChain, SDNPMayLoad, SDNPOptInGlue]>; +def AArch64ld1s_gather_sxtw : SDNode<"AArch64ISD::GLD1S_SXTW", SDT_AArch64_GLD1, [SDNPHasChain, SDNPMayLoad, SDNPOptInGlue]>; +def AArch64ld1s_gather_uxtw_scaled : SDNode<"AArch64ISD::GLD1S_UXTW_SCALED", SDT_AArch64_GLD1, [SDNPHasChain, SDNPMayLoad, SDNPOptInGlue]>; +def AArch64ld1s_gather_sxtw_scaled : SDNode<"AArch64ISD::GLD1S_SXTW_SCALED", SDT_AArch64_GLD1, [SDNPHasChain, SDNPMayLoad, SDNPOptInGlue]>; +def AArch64ld1s_gather_imm : SDNode<"AArch64ISD::GLD1S_IMM", SDT_AArch64_GLD1_IMM, [SDNPHasChain, SDNPMayLoad, SDNPOptInGlue]>; + let Predicates = [HasSVE] in { def RDFFR_PPz : sve_int_rdffr_pred<0b0, "rdffr">; @@ -420,11 +428,11 @@ // Gathers using unscaled 32-bit offsets, e.g. // ld1h z0.s, p0/z, [x0, z0.s, uxtw] - defm GLD1SB_S : sve_mem_32b_gld_vs_32_unscaled<0b0000, "ld1sb", null_frag, null_frag, ZPR32ExtSXTW8Only, ZPR32ExtUXTW8Only, nxv4i8>; + defm GLD1SB_S : sve_mem_32b_gld_vs_32_unscaled<0b0000, "ld1sb", AArch64ld1s_gather_sxtw, AArch64ld1s_gather_uxtw, ZPR32ExtSXTW8Only, ZPR32ExtUXTW8Only, nxv4i8>; defm GLDFF1SB_S : sve_mem_32b_gld_vs_32_unscaled<0b0001, "ldff1sb", null_frag, null_frag, ZPR32ExtSXTW8Only, ZPR32ExtUXTW8Only, nxv4i8>; defm GLD1B_S : sve_mem_32b_gld_vs_32_unscaled<0b0010, "ld1b", AArch64ld1_gather_sxtw, AArch64ld1_gather_uxtw, ZPR32ExtSXTW8Only, ZPR32ExtUXTW8Only, nxv4i8>; defm GLDFF1B_S : sve_mem_32b_gld_vs_32_unscaled<0b0011, "ldff1b", null_frag, null_frag, ZPR32ExtSXTW8Only, ZPR32ExtUXTW8Only, nxv4i8>; - defm GLD1SH_S : sve_mem_32b_gld_vs_32_unscaled<0b0100, "ld1sh", null_frag, null_frag, ZPR32ExtSXTW8, ZPR32ExtUXTW8, nxv4i16>; + defm GLD1SH_S : sve_mem_32b_gld_vs_32_unscaled<0b0100, "ld1sh", AArch64ld1s_gather_sxtw, AArch64ld1s_gather_uxtw, ZPR32ExtSXTW8, ZPR32ExtUXTW8, nxv4i16>; defm GLDFF1SH_S : sve_mem_32b_gld_vs_32_unscaled<0b0101, "ldff1sh", null_frag, null_frag, ZPR32ExtSXTW8, ZPR32ExtUXTW8, nxv4i16>; defm GLD1H_S : sve_mem_32b_gld_vs_32_unscaled<0b0110, "ld1h", AArch64ld1_gather_sxtw, AArch64ld1_gather_uxtw, ZPR32ExtSXTW8, ZPR32ExtUXTW8, nxv4i16>; defm GLDFF1H_S : sve_mem_32b_gld_vs_32_unscaled<0b0111, "ldff1h", null_frag, null_frag, ZPR32ExtSXTW8, ZPR32ExtUXTW8, nxv4i16>; @@ -433,7 +441,7 @@ // Gathers using scaled 32-bit offsets, e.g. // ld1h z0.s, p0/z, [x0, z0.s, uxtw #1] - defm GLD1SH_S : sve_mem_32b_gld_sv_32_scaled<0b0100, "ld1sh", null_frag, null_frag, ZPR32ExtSXTW16, ZPR32ExtUXTW16, nxv4i16>; + defm GLD1SH_S : sve_mem_32b_gld_sv_32_scaled<0b0100, "ld1sh", AArch64ld1s_gather_sxtw_scaled, AArch64ld1s_gather_uxtw_scaled, ZPR32ExtSXTW16, ZPR32ExtUXTW16, nxv4i16>; defm GLDFF1SH_S : sve_mem_32b_gld_sv_32_scaled<0b0101, "ldff1sh", null_frag, null_frag, ZPR32ExtSXTW16, ZPR32ExtUXTW16, nxv4i16>; defm GLD1H_S : sve_mem_32b_gld_sv_32_scaled<0b0110, "ld1h", AArch64ld1_gather_sxtw_scaled, AArch64ld1_gather_uxtw_scaled, ZPR32ExtSXTW16, ZPR32ExtUXTW16, nxv4i16>; defm GLDFF1H_S : sve_mem_32b_gld_sv_32_scaled<0b0111, "ldff1h", null_frag, null_frag, ZPR32ExtSXTW16, ZPR32ExtUXTW16, nxv4i16>; @@ -442,11 +450,11 @@ // Gathers using 32-bit pointers with scaled offset, e.g. // ld1h z0.s, p0/z, [z0.s, #16] - defm GLD1SB_S : sve_mem_32b_gld_vi_32_ptrs<0b0000, "ld1sb", imm0_31, null_frag, nxv4i8>; + defm GLD1SB_S : sve_mem_32b_gld_vi_32_ptrs<0b0000, "ld1sb", imm0_31, AArch64ld1s_gather_imm, nxv4i8>; defm GLDFF1SB_S : sve_mem_32b_gld_vi_32_ptrs<0b0001, "ldff1sb", imm0_31, null_frag, nxv4i8>; defm GLD1B_S : sve_mem_32b_gld_vi_32_ptrs<0b0010, "ld1b", imm0_31, AArch64ld1_gather_imm, nxv4i8>; defm GLDFF1B_S : sve_mem_32b_gld_vi_32_ptrs<0b0011, "ldff1b", imm0_31, null_frag, nxv4i8>; - defm GLD1SH_S : sve_mem_32b_gld_vi_32_ptrs<0b0100, "ld1sh", uimm5s2, null_frag, nxv4i16>; + defm GLD1SH_S : sve_mem_32b_gld_vi_32_ptrs<0b0100, "ld1sh", uimm5s2, AArch64ld1s_gather_imm, nxv4i16>; defm GLDFF1SH_S : sve_mem_32b_gld_vi_32_ptrs<0b0101, "ldff1sh", uimm5s2, null_frag, nxv4i16>; defm GLD1H_S : sve_mem_32b_gld_vi_32_ptrs<0b0110, "ld1h", uimm5s2, AArch64ld1_gather_imm, nxv4i16>; defm GLDFF1H_S : sve_mem_32b_gld_vi_32_ptrs<0b0111, "ldff1h", uimm5s2, null_frag, nxv4i16>; @@ -455,45 +463,45 @@ // Gathers using 64-bit pointers with scaled offset, e.g. // ld1h z0.d, p0/z, [z0.d, #16] - defm GLD1SB_D : sve_mem_64b_gld_vi_64_ptrs<0b0000, "ld1sb", imm0_31, null_frag, nxv2i8>; - defm GLDFF1SB_D : sve_mem_64b_gld_vi_64_ptrs<0b0001, "ldff1sb", imm0_31, null_frag, nxv2i8>; - defm GLD1B_D : sve_mem_64b_gld_vi_64_ptrs<0b0010, "ld1b", imm0_31, AArch64ld1_gather_imm, nxv2i8>; - defm GLDFF1B_D : sve_mem_64b_gld_vi_64_ptrs<0b0011, "ldff1b", imm0_31, null_frag, nxv2i8>; - defm GLD1SH_D : sve_mem_64b_gld_vi_64_ptrs<0b0100, "ld1sh", uimm5s2, null_frag, nxv2i16>; - defm GLDFF1SH_D : sve_mem_64b_gld_vi_64_ptrs<0b0101, "ldff1sh", uimm5s2, null_frag, nxv2i16>; - defm GLD1H_D : sve_mem_64b_gld_vi_64_ptrs<0b0110, "ld1h", uimm5s4, AArch64ld1_gather_imm, nxv2i16>; - defm GLDFF1H_D : sve_mem_64b_gld_vi_64_ptrs<0b0111, "ldff1h", uimm5s2, null_frag, nxv2i16>; - defm GLD1SW_D : sve_mem_64b_gld_vi_64_ptrs<0b1000, "ld1sw", uimm5s4, null_frag, nxv2i32>; - defm GLDFF1SW_D : sve_mem_64b_gld_vi_64_ptrs<0b1001, "ldff1sw", uimm5s4, null_frag, nxv2i32>; - defm GLD1W_D : sve_mem_64b_gld_vi_64_ptrs<0b1010, "ld1w", uimm5s4, AArch64ld1_gather_imm, nxv2i32>; - defm GLDFF1W_D : sve_mem_64b_gld_vi_64_ptrs<0b1011, "ldff1w", uimm5s4, null_frag, nxv2i32>; - defm GLD1D : sve_mem_64b_gld_vi_64_ptrs<0b1110, "ld1d", uimm5s8, AArch64ld1_gather_imm, nxv2i64>; - defm GLDFF1D : sve_mem_64b_gld_vi_64_ptrs<0b1111, "ldff1d", uimm5s8, null_frag, nxv2i64>; + defm GLD1SB_D : sve_mem_64b_gld_vi_64_ptrs<0b0000, "ld1sb", imm0_31, AArch64ld1s_gather_imm, nxv2i8>; + defm GLDFF1SB_D : sve_mem_64b_gld_vi_64_ptrs<0b0001, "ldff1sb", imm0_31, null_frag, nxv2i8>; + defm GLD1B_D : sve_mem_64b_gld_vi_64_ptrs<0b0010, "ld1b", imm0_31, AArch64ld1_gather_imm, nxv2i8>; + defm GLDFF1B_D : sve_mem_64b_gld_vi_64_ptrs<0b0011, "ldff1b", imm0_31, null_frag, nxv2i8>; + defm GLD1SH_D : sve_mem_64b_gld_vi_64_ptrs<0b0100, "ld1sh", uimm5s2, AArch64ld1s_gather_imm, nxv2i16>; + defm GLDFF1SH_D : sve_mem_64b_gld_vi_64_ptrs<0b0101, "ldff1sh", uimm5s2, null_frag, nxv2i16>; + defm GLD1H_D : sve_mem_64b_gld_vi_64_ptrs<0b0110, "ld1h", uimm5s4, AArch64ld1_gather_imm, nxv2i16>; + defm GLDFF1H_D : sve_mem_64b_gld_vi_64_ptrs<0b0111, "ldff1h", uimm5s2, null_frag, nxv2i16>; + defm GLD1SW_D : sve_mem_64b_gld_vi_64_ptrs<0b1000, "ld1sw", uimm5s4, AArch64ld1s_gather_imm, nxv2i32>; + defm GLDFF1SW_D : sve_mem_64b_gld_vi_64_ptrs<0b1001, "ldff1sw", uimm5s4, null_frag, nxv2i32>; + defm GLD1W_D : sve_mem_64b_gld_vi_64_ptrs<0b1010, "ld1w", uimm5s4, AArch64ld1_gather_imm, nxv2i32>; + defm GLDFF1W_D : sve_mem_64b_gld_vi_64_ptrs<0b1011, "ldff1w", uimm5s4, null_frag, nxv2i32>; + defm GLD1D : sve_mem_64b_gld_vi_64_ptrs<0b1110, "ld1d", uimm5s8, AArch64ld1_gather_imm, nxv2i64>; + defm GLDFF1D : sve_mem_64b_gld_vi_64_ptrs<0b1111, "ldff1d", uimm5s8, null_frag, nxv2i64>; // Gathers using unscaled 64-bit offsets, e.g. // ld1h z0.d, p0/z, [x0, z0.d] - defm GLD1SB_D : sve_mem_64b_gld_vs2_64_unscaled<0b0000, "ld1sb", null_frag, nxv2i8>; - defm GLDFF1SB_D : sve_mem_64b_gld_vs2_64_unscaled<0b0001, "ldff1sb", null_frag, nxv2i8>; - defm GLD1B_D : sve_mem_64b_gld_vs2_64_unscaled<0b0010, "ld1b", AArch64ld1_gather, nxv2i8>; - defm GLDFF1B_D : sve_mem_64b_gld_vs2_64_unscaled<0b0011, "ldff1b", null_frag, nxv2i8>; - defm GLD1SH_D : sve_mem_64b_gld_vs2_64_unscaled<0b0100, "ld1sh", null_frag, nxv2i16>; - defm GLDFF1SH_D : sve_mem_64b_gld_vs2_64_unscaled<0b0101, "ldff1sh", null_frag, nxv2i16>; - defm GLD1H_D : sve_mem_64b_gld_vs2_64_unscaled<0b0110, "ld1h", AArch64ld1_gather, nxv2i16>; - defm GLDFF1H_D : sve_mem_64b_gld_vs2_64_unscaled<0b0111, "ldff1h", null_frag, nxv2i16>; - defm GLD1SW_D : sve_mem_64b_gld_vs2_64_unscaled<0b1000, "ld1sw", null_frag, nxv2i32>; - defm GLDFF1SW_D : sve_mem_64b_gld_vs2_64_unscaled<0b1001, "ldff1sw", null_frag, nxv2i32>; - defm GLD1W_D : sve_mem_64b_gld_vs2_64_unscaled<0b1010, "ld1w", AArch64ld1_gather, nxv2i32>; - defm GLDFF1W_D : sve_mem_64b_gld_vs2_64_unscaled<0b1011, "ldff1w", null_frag, nxv2i32>; - defm GLD1D : sve_mem_64b_gld_vs2_64_unscaled<0b1110, "ld1d", AArch64ld1_gather, nxv2i64>; - defm GLDFF1D : sve_mem_64b_gld_vs2_64_unscaled<0b1111, "ldff1d", null_frag, nxv2i64>; + defm GLD1SB_D : sve_mem_64b_gld_vs2_64_unscaled<0b0000, "ld1sb", AArch64ld1s_gather, nxv2i8>; + defm GLDFF1SB_D : sve_mem_64b_gld_vs2_64_unscaled<0b0001, "ldff1sb", null_frag, nxv2i8>; + defm GLD1B_D : sve_mem_64b_gld_vs2_64_unscaled<0b0010, "ld1b", AArch64ld1_gather, nxv2i8>; + defm GLDFF1B_D : sve_mem_64b_gld_vs2_64_unscaled<0b0011, "ldff1b", null_frag, nxv2i8>; + defm GLD1SH_D : sve_mem_64b_gld_vs2_64_unscaled<0b0100, "ld1sh", AArch64ld1s_gather, nxv2i16>; + defm GLDFF1SH_D : sve_mem_64b_gld_vs2_64_unscaled<0b0101, "ldff1sh", null_frag, nxv2i16>; + defm GLD1H_D : sve_mem_64b_gld_vs2_64_unscaled<0b0110, "ld1h", AArch64ld1_gather, nxv2i16>; + defm GLDFF1H_D : sve_mem_64b_gld_vs2_64_unscaled<0b0111, "ldff1h", null_frag, nxv2i16>; + defm GLD1SW_D : sve_mem_64b_gld_vs2_64_unscaled<0b1000, "ld1sw", AArch64ld1s_gather, nxv2i32>; + defm GLDFF1SW_D : sve_mem_64b_gld_vs2_64_unscaled<0b1001, "ldff1sw", null_frag, nxv2i32>; + defm GLD1W_D : sve_mem_64b_gld_vs2_64_unscaled<0b1010, "ld1w", AArch64ld1_gather, nxv2i32>; + defm GLDFF1W_D : sve_mem_64b_gld_vs2_64_unscaled<0b1011, "ldff1w", null_frag, nxv2i32>; + defm GLD1D : sve_mem_64b_gld_vs2_64_unscaled<0b1110, "ld1d", AArch64ld1_gather, nxv2i64>; + defm GLDFF1D : sve_mem_64b_gld_vs2_64_unscaled<0b1111, "ldff1d", null_frag, nxv2i64>; // Gathers using scaled 64-bit offsets, e.g. // ld1h z0.d, p0/z, [x0, z0.d, lsl #1] - defm GLD1SH_D : sve_mem_64b_gld_sv2_64_scaled<0b0100, "ld1sh", null_frag, ZPR64ExtLSL16, nxv2i16>; + defm GLD1SH_D : sve_mem_64b_gld_sv2_64_scaled<0b0100, "ld1sh", AArch64ld1s_gather_scaled, ZPR64ExtLSL16, nxv2i16>; defm GLDFF1SH_D : sve_mem_64b_gld_sv2_64_scaled<0b0101, "ldff1sh", null_frag, ZPR64ExtLSL16, nxv2i16>; defm GLD1H_D : sve_mem_64b_gld_sv2_64_scaled<0b0110, "ld1h", AArch64ld1_gather_scaled, ZPR64ExtLSL16, nxv2i16>; defm GLDFF1H_D : sve_mem_64b_gld_sv2_64_scaled<0b0111, "ldff1h", null_frag, ZPR64ExtLSL16, nxv2i16>; - defm GLD1SW_D : sve_mem_64b_gld_sv2_64_scaled<0b1000, "ld1sw", null_frag, ZPR64ExtLSL32, nxv2i32>; + defm GLD1SW_D : sve_mem_64b_gld_sv2_64_scaled<0b1000, "ld1sw", AArch64ld1s_gather_scaled, ZPR64ExtLSL32, nxv2i32>; defm GLDFF1SW_D : sve_mem_64b_gld_sv2_64_scaled<0b1001, "ldff1sw", null_frag, ZPR64ExtLSL32, nxv2i32>; defm GLD1W_D : sve_mem_64b_gld_sv2_64_scaled<0b1010, "ld1w", AArch64ld1_gather_scaled, ZPR64ExtLSL32, nxv2i32>; defm GLDFF1W_D : sve_mem_64b_gld_sv2_64_scaled<0b1011, "ldff1w", null_frag, ZPR64ExtLSL32, nxv2i32>; @@ -502,15 +510,15 @@ // Gathers using unscaled 32-bit offsets unpacked in 64-bits elements, e.g. // ld1h z0.d, p0/z, [x0, z0.d, uxtw] - defm GLD1SB_D : sve_mem_64b_gld_vs_32_unscaled<0b0000, "ld1sb", null_frag, null_frag, ZPR64ExtSXTW8Only, ZPR64ExtUXTW8Only, nxv2i8>; + defm GLD1SB_D : sve_mem_64b_gld_vs_32_unscaled<0b0000, "ld1sb", AArch64ld1s_gather_sxtw, AArch64ld1s_gather_uxtw, ZPR64ExtSXTW8Only, ZPR64ExtUXTW8Only, nxv2i8>; defm GLDFF1SB_D : sve_mem_64b_gld_vs_32_unscaled<0b0001, "ldff1sb", null_frag, null_frag, ZPR64ExtSXTW8Only, ZPR64ExtUXTW8Only, nxv2i8>; defm GLD1B_D : sve_mem_64b_gld_vs_32_unscaled<0b0010, "ld1b", AArch64ld1_gather_sxtw, AArch64ld1_gather_uxtw, ZPR64ExtSXTW8Only, ZPR64ExtUXTW8Only, nxv2i8>; defm GLDFF1B_D : sve_mem_64b_gld_vs_32_unscaled<0b0011, "ldff1b", null_frag, null_frag, ZPR64ExtSXTW8Only, ZPR64ExtUXTW8Only, nxv2i8>; - defm GLD1SH_D : sve_mem_64b_gld_vs_32_unscaled<0b0100, "ld1sh", null_frag, null_frag, ZPR64ExtSXTW8, ZPR64ExtUXTW8, nxv2i16>; + defm GLD1SH_D : sve_mem_64b_gld_vs_32_unscaled<0b0100, "ld1sh", AArch64ld1s_gather_sxtw, AArch64ld1s_gather_uxtw, ZPR64ExtSXTW8, ZPR64ExtUXTW8, nxv2i16>; defm GLDFF1SH_D : sve_mem_64b_gld_vs_32_unscaled<0b0101, "ldff1sh", null_frag, null_frag, ZPR64ExtSXTW8, ZPR64ExtUXTW8, nxv2i16>; defm GLD1H_D : sve_mem_64b_gld_vs_32_unscaled<0b0110, "ld1h", AArch64ld1_gather_sxtw, AArch64ld1_gather_uxtw, ZPR64ExtSXTW8, ZPR64ExtUXTW8, nxv2i16>; defm GLDFF1H_D : sve_mem_64b_gld_vs_32_unscaled<0b0111, "ldff1h", null_frag, null_frag, ZPR64ExtSXTW8, ZPR64ExtUXTW8, nxv2i16>; - defm GLD1SW_D : sve_mem_64b_gld_vs_32_unscaled<0b1000, "ld1sw", null_frag, null_frag, ZPR64ExtSXTW8, ZPR64ExtUXTW8, nxv2i32>; + defm GLD1SW_D : sve_mem_64b_gld_vs_32_unscaled<0b1000, "ld1sw", AArch64ld1s_gather_sxtw, AArch64ld1s_gather_uxtw, ZPR64ExtSXTW8, ZPR64ExtUXTW8, nxv2i32>; defm GLDFF1SW_D : sve_mem_64b_gld_vs_32_unscaled<0b1001, "ldff1sw", null_frag, null_frag, ZPR64ExtSXTW8, ZPR64ExtUXTW8, nxv2i32>; defm GLD1W_D : sve_mem_64b_gld_vs_32_unscaled<0b1010, "ld1w", AArch64ld1_gather_sxtw, AArch64ld1_gather_uxtw, ZPR64ExtSXTW8, ZPR64ExtUXTW8, nxv2i32>; defm GLDFF1W_D : sve_mem_64b_gld_vs_32_unscaled<0b1011, "ldff1w", null_frag, null_frag, ZPR64ExtSXTW8, ZPR64ExtUXTW8, nxv2i32>; @@ -519,16 +527,16 @@ // Gathers using scaled 32-bit offsets unpacked in 64-bits elements, e.g. // ld1h z0.d, p0/z, [x0, z0.d, uxtw #1] - defm GLD1SH_D : sve_mem_64b_gld_sv_32_scaled<0b0100, "ld1sh", null_frag, null_frag, ZPR64ExtSXTW16, ZPR64ExtUXTW16, nxv2i16>; - defm GLDFF1SH_D : sve_mem_64b_gld_sv_32_scaled<0b0101, "ldff1sh", null_frag, null_frag, ZPR64ExtSXTW16, ZPR64ExtUXTW16, nxv2i16>; - defm GLD1H_D : sve_mem_64b_gld_sv_32_scaled<0b0110, "ld1h", AArch64ld1_gather_sxtw_scaled, AArch64ld1_gather_uxtw_scaled, ZPR64ExtSXTW16, ZPR64ExtUXTW16, nxv2i16>; - defm GLDFF1H_D : sve_mem_64b_gld_sv_32_scaled<0b0111, "ldff1h", null_frag, null_frag, ZPR64ExtSXTW16, ZPR64ExtUXTW16, nxv2i16>; - defm GLD1SW_D : sve_mem_64b_gld_sv_32_scaled<0b1000, "ld1sw", null_frag, null_frag, ZPR64ExtSXTW32, ZPR64ExtUXTW32, nxv2i32>; - defm GLDFF1SW_D : sve_mem_64b_gld_sv_32_scaled<0b1001, "ldff1sw", null_frag, null_frag, ZPR64ExtSXTW32, ZPR64ExtUXTW32, nxv2i32>; - defm GLD1W_D : sve_mem_64b_gld_sv_32_scaled<0b1010, "ld1w", AArch64ld1_gather_sxtw_scaled, AArch64ld1_gather_uxtw_scaled, ZPR64ExtSXTW32, ZPR64ExtUXTW32, nxv2i32>; - defm GLDFF1W_D : sve_mem_64b_gld_sv_32_scaled<0b1011, "ldff1w", null_frag, null_frag, ZPR64ExtSXTW32, ZPR64ExtUXTW32, nxv2i32>; - defm GLD1D : sve_mem_64b_gld_sv_32_scaled<0b1110, "ld1d", AArch64ld1_gather_sxtw_scaled, AArch64ld1_gather_uxtw_scaled, ZPR64ExtSXTW64, ZPR64ExtUXTW64, nxv2i64>; - defm GLDFF1D : sve_mem_64b_gld_sv_32_scaled<0b1111, "ldff1d", null_frag, null_frag, ZPR64ExtSXTW64, ZPR64ExtUXTW64, nxv2i64>; + defm GLD1SH_D : sve_mem_64b_gld_sv_32_scaled<0b0100, "ld1sh", AArch64ld1s_gather_sxtw_scaled, AArch64ld1s_gather_uxtw_scaled, ZPR64ExtSXTW16, ZPR64ExtUXTW16, nxv2i16>; + defm GLDFF1SH_D : sve_mem_64b_gld_sv_32_scaled<0b0101, "ldff1sh", null_frag, null_frag, ZPR64ExtSXTW16, ZPR64ExtUXTW16, nxv2i16>; + defm GLD1H_D : sve_mem_64b_gld_sv_32_scaled<0b0110, "ld1h", AArch64ld1_gather_sxtw_scaled, AArch64ld1_gather_uxtw_scaled, ZPR64ExtSXTW16, ZPR64ExtUXTW16, nxv2i16>; + defm GLDFF1H_D : sve_mem_64b_gld_sv_32_scaled<0b0111, "ldff1h", null_frag, null_frag, ZPR64ExtSXTW16, ZPR64ExtUXTW16, nxv2i16>; + defm GLD1SW_D : sve_mem_64b_gld_sv_32_scaled<0b1000, "ld1sw", AArch64ld1s_gather_sxtw_scaled, AArch64ld1s_gather_uxtw_scaled, ZPR64ExtSXTW32, ZPR64ExtUXTW32, nxv2i32>; + defm GLDFF1SW_D : sve_mem_64b_gld_sv_32_scaled<0b1001, "ldff1sw", null_frag, null_frag, ZPR64ExtSXTW32, ZPR64ExtUXTW32, nxv2i32>; + defm GLD1W_D : sve_mem_64b_gld_sv_32_scaled<0b1010, "ld1w", AArch64ld1_gather_sxtw_scaled, AArch64ld1_gather_uxtw_scaled, ZPR64ExtSXTW32, ZPR64ExtUXTW32, nxv2i32>; + defm GLDFF1W_D : sve_mem_64b_gld_sv_32_scaled<0b1011, "ldff1w", null_frag, null_frag, ZPR64ExtSXTW32, ZPR64ExtUXTW32, nxv2i32>; + defm GLD1D : sve_mem_64b_gld_sv_32_scaled<0b1110, "ld1d", AArch64ld1_gather_sxtw_scaled, AArch64ld1_gather_uxtw_scaled, ZPR64ExtSXTW64, ZPR64ExtUXTW64, nxv2i64>; + defm GLDFF1D : sve_mem_64b_gld_sv_32_scaled<0b1111, "ldff1d", null_frag, null_frag, ZPR64ExtSXTW64, ZPR64ExtUXTW64, nxv2i64>; // Non-temporal contiguous loads (register + immediate) defm LDNT1B_ZRI : sve_mem_cldnt_si<0b00, "ldnt1b", Z_b, ZPR8>; Index: llvm/test/CodeGen/AArch64/sve-intrinsics-gather-loads-64bit-scaled-offset.ll =================================================================== --- llvm/test/CodeGen/AArch64/sve-intrinsics-gather-loads-64bit-scaled-offset.ll +++ llvm/test/CodeGen/AArch64/sve-intrinsics-gather-loads-64bit-scaled-offset.ll @@ -8,9 +8,6 @@ define @gld1h_index( %pg, i16* %base, %b) { ; CHECK-LABEL: gld1h_index ; CHECK: ld1h { z0.d }, p0/z, [x0, z0.d, lsl #1] -; CHECK-NEXT: mov w8, #65535 -; CHECK-NEXT: mov z1.d, x8 -; CHECK-NEXT: and z0.d, z0.d, z1.d ; CHECK-NEXT: ret %load = call @llvm.aarch64.sve.ld1.gather.index.nxv2i16( %pg, i16* %base, @@ -22,9 +19,6 @@ define @gld1w_index( %pg, i32* %base, %b) { ; CHECK-LABEL: gld1w_index ; CHECK: ld1w { z0.d }, p0/z, [x0, z0.d, lsl #2] -; CHECK-NEXT: mov w8, #-1 -; CHECK-NEXT: mov z1.d, x8 -; CHECK-NEXT: and z0.d, z0.d, z1.d ; CHECK-NEXT: ret %load = call @llvm.aarch64.sve.ld1.gather.index.nxv2i32( %pg, i32* %base, @@ -53,6 +47,33 @@ ret %load } +; +; LD1SH, LD1SW: base + 64-bit scaled offset +; e.g. ld1sh z0.d, p0/z, [x0, z0.d, lsl #1] +; + +define @gld1sh_index( %pg, i16* %base, %b) { +; CHECK-LABEL: gld1sh_index +; CHECK: ld1sh { z0.d }, p0/z, [x0, z0.d, lsl #1] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ld1.gather.index.nxv2i16( %pg, + i16* %base, + %b) + %res = sext %load to + ret %res +} + +define @gld1sw_index( %pg, i32* %base, %b) { +; CHECK-LABEL: gld1sw_index +; CHECK: ld1sw { z0.d }, p0/z, [x0, z0.d, lsl #2] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ld1.gather.index.nxv2i32( %pg, + i32* %base, + %b) + %res = sext %load to + ret %res +} + declare @llvm.aarch64.sve.ld1.gather.index.nxv2i16(, i16*, ) declare @llvm.aarch64.sve.ld1.gather.index.nxv2i32(, i32*, ) declare @llvm.aarch64.sve.ld1.gather.index.nxv2i64(, i64*, ) Index: llvm/test/CodeGen/AArch64/sve-intrinsics-gather-loads-64bit-unscaled-offset.ll =================================================================== --- llvm/test/CodeGen/AArch64/sve-intrinsics-gather-loads-64bit-unscaled-offset.ll +++ llvm/test/CodeGen/AArch64/sve-intrinsics-gather-loads-64bit-unscaled-offset.ll @@ -8,9 +8,6 @@ define @gld1b_d( %pg, i8* %base, %b) { ; CHECK-LABEL: gld1b_d: ; CHECK: ld1b { z0.d }, p0/z, [x0, z0.d] -; CHECK-NEXT: mov w8, #255 -; CHECK-NEXT: mov z1.d, x8 -; CHECK-NEXT: and z0.d, z0.d, z1.d ; CHECK-NEXT: ret %load = call @llvm.aarch64.sve.ld1.gather.nxv2i8( %pg, i8* %base, @@ -22,9 +19,6 @@ define @gld1h_d( %pg, i16* %base, %b) { ; CHECK-LABEL: gld1h_d: ; CHECK: ld1h { z0.d }, p0/z, [x0, z0.d] -; CHECK-NEXT: mov w8, #65535 -; CHECK-NEXT: mov z1.d, x8 -; CHECK-NEXT: and z0.d, z0.d, z1.d ; CHECK-NEXT: ret %load = call @llvm.aarch64.sve.ld1.gather.nxv2i16( %pg, i16* %base, @@ -36,9 +30,6 @@ define @gld1w_d( %pg, i32* %base, %offsets) { ; CHECK-LABEL: gld1w_d: ; CHECK: ld1w { z0.d }, p0/z, [x0, z0.d] -; CHECK-NEXT: mov w8, #-1 -; CHECK-NEXT: mov z1.d, x8 -; CHECK-NEXT: and z0.d, z0.d, z1.d ; CHECK-NEXT: ret %load = call @llvm.aarch64.sve.ld1.gather.nxv2i32( %pg, i32* %base, @@ -65,6 +56,43 @@ double* %base, %b) ret %load + +; +; LD1SB, LD1SW, LD1SH: base + 64-bit unscaled offset +; e.g. ld1sh { z0.d }, p0/z, [x0, z0.d] +; + +define @gld1sb_d( %pg, i8* %base, %b) { +; CHECK-LABEL: gld1sb_d: +; CHECK: ld1sb { z0.d }, p0/z, [x0, z0.d] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ld1.gather.nxv2i8( %pg, + i8* %base, + %b) + %res = sext %load to + ret %res +} + +define @gld1sh_d( %pg, i16* %base, %b) { +; CHECK-LABEL: gld1sh_d: +; CHECK: ld1sh { z0.d }, p0/z, [x0, z0.d] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ld1.gather.nxv2i16( %pg, + i16* %base, + %b) + %res = sext %load to + ret %res +} + +define @gld1sw_d( %pg, i32* %base, %offsets) { +; CHECK-LABEL: gld1sw_d: +; CHECK: ld1sw { z0.d }, p0/z, [x0, z0.d] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ld1.gather.nxv2i32( %pg, + i32* %base, + %offsets) + %res = sext %load to + ret %res } declare @llvm.aarch64.sve.ld1.gather.nxv2i8(, i8*, ) Index: llvm/test/CodeGen/AArch64/sve-intrinsics-gather-loads-scaled-32bit-offsets.ll =================================================================== --- llvm/test/CodeGen/AArch64/sve-intrinsics-gather-loads-scaled-32bit-offsets.ll +++ llvm/test/CodeGen/AArch64/sve-intrinsics-gather-loads-scaled-32bit-offsets.ll @@ -10,9 +10,6 @@ define @gld1h_s_uxtw_index( %pg, i16* %base, %b) { ; CHECK-LABEL: gld1h_s_uxtw_index: ; CHECK: ld1h { z0.s }, p0/z, [x0, z0.s, uxtw #1] -; CHECK-NEXT: mov w8, #65535 -; CHECK-NEXT: mov z1.s, w8 -; CHECK-NEXT: and z0.d, z0.d, z1.d ; CHECK-NEXT: ret %load = call @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4i16.nxv4i32( %pg, i16* %base, @@ -24,9 +21,6 @@ define @gld1h_s_sxtw_index( %pg, i16* %base, %b) { ; CHECK-LABEL: gld1h_s_sxtw_index: ; CHECK: ld1h { z0.s }, p0/z, [x0, z0.s, sxtw #1] -; CHECK-NEXT: mov w8, #65535 -; CHECK-NEXT: mov z1.s, w8 -; CHECK-NEXT: and z0.d, z0.d, z1.d ; CHECK-NEXT: ret %load = call @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv4i16.nxv4i32( %pg, i16* %base, @@ -38,9 +32,6 @@ define @gld1h_d_uxtw_index( %pg, i16* %base, %b) { ; CHECK-LABEL: gld1h_d_uxtw_index: ; CHECK: ld1h { z0.d }, p0/z, [x0, z0.d, uxtw #1] -; CHECK-NEXT: mov w8, #65535 -; CHECK-NEXT: mov z1.d, x8 -; CHECK-NEXT: and z0.d, z0.d, z1.d ; CHECK-NEXT: ret %load = call @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv2i16.nxv2i64( %pg, i16* %base, @@ -52,9 +43,6 @@ define @gld1h_d_sxtw_index( %pg, i16* %base, %b) { ; CHECK-LABEL: gld1h_d_sxtw_index: ; CHECK: ld1h { z0.d }, p0/z, [x0, z0.d, sxtw #1] -; CHECK-NEXT: mov w8, #65535 -; CHECK-NEXT: mov z1.d, x8 -; CHECK-NEXT: and z0.d, z0.d, z1.d ; CHECK-NEXT: ret %load = call @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv2i16.nxv2i64( %pg, i16* %base, @@ -87,9 +75,6 @@ define @gld1w_d_uxtw_index( %pg, i32* %base, %b) { ; CHECK-LABEL: gld1w_d_uxtw_index: ; CHECK: ld1w { z0.d }, p0/z, [x0, z0.d, uxtw #2] -; CHECK-NEXT: mov w8, #-1 -; CHECK-NEXT: mov z1.d, x8 -; CHECK-NEXT: and z0.d, z0.d, z1.d ; CHECK-NEXT: ret %load = call @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv2i32.nxv2i64( %pg, i32* %base, @@ -101,9 +86,6 @@ define @gld1w_d_sxtw_index( %pg, i32* %base, %b) { ; CHECK-LABEL: gld1w_d_sxtw_index: ; CHECK: ld1w { z0.d }, p0/z, [x0, z0.d, sxtw #2] -; CHECK-NEXT: mov w8, #-1 -; CHECK-NEXT: mov z1.d, x8 -; CHECK-NEXT: and z0.d, z0.d, z1.d ; CHECK-NEXT: ret %load = call @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv2i32.nxv2i64( %pg, i32* %base, @@ -173,14 +155,89 @@ ret %load } -; LD1H +; +; LD1SH, LD1SW, LD1SD: base + 32-bit scaled offset, sign (sxtw) or zero (uxtw) +; extended to 64 bits +; e.g. ld1sh z0.d, p0/z, [x0, z0.d, uxtw #1] +; + +; LD1SH +define @gld1sh_s_uxtw_index( %pg, i16* %base, %b) { +; CHECK-LABEL: gld1sh_s_uxtw_index: +; CHECK: ld1sh { z0.s }, p0/z, [x0, z0.s, uxtw #1] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4i16.nxv4i32( %pg, + i16* %base, + %b) + %res = sext %load to + ret %res +} + +define @gld1sh_s_sxtw_index( %pg, i16* %base, %b) { +; CHECK-LABEL: gld1sh_s_sxtw_index: +; CHECK: ld1sh { z0.s }, p0/z, [x0, z0.s, sxtw #1] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv4i16.nxv4i32( %pg, + i16* %base, + %b) + %res = sext %load to + ret %res +} + +define @gld1sh_d_uxtw_index( %pg, i16* %base, %b) { +; CHECK-LABEL: gld1sh_d_uxtw_index: +; CHECK: ld1sh { z0.d }, p0/z, [x0, z0.d, uxtw #1] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv2i16.nxv2i64( %pg, + i16* %base, + %b) + %res = sext %load to + ret %res +} + +define @gld1sh_d_sxtw_index( %pg, i16* %base, %b) { +; CHECK-LABEL: gld1sh_d_sxtw_index: +; CHECK: ld1sh { z0.d }, p0/z, [x0, z0.d, sxtw #1] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv2i16.nxv2i64( %pg, + i16* %base, + %b) + %res = sext %load to + ret %res +} + +; LD1SW +define @gld1sw_d_uxtw_index( %pg, i32* %base, %b) { +; CHECK-LABEL: gld1sw_d_uxtw_index: +; CHECK: ld1sw { z0.d }, p0/z, [x0, z0.d, uxtw #2] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv2i32.nxv2i64( %pg, + i32* %base, + %b) + %res = sext %load to + ret %res +} + +define @gld1sw_d_sxtw_index( %pg, i32* %base, %b) { +; CHECK-LABEL: gld1sw_d_sxtw_index: +; CHECK: ld1sw { z0.d }, p0/z, [x0, z0.d, sxtw #2] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv2i32.nxv2i64( %pg, + i32* %base, + %b) + %res = sext %load to + ret %res +} + + +; LD1H/LD1SH declare @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4i16.nxv4i32(, i16*, ) declare @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv4i16.nxv4i32(, i16*, ) declare @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv2i16.nxv2i64(, i16*, ) declare @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv2i16.nxv2i64(, i16*, ) -; LD1W +; LD1W/LD1SW declare @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4i32.nxv4i32(, i32*, ) declare @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv4i32.nxv4i32(, i32*, ) Index: llvm/test/CodeGen/AArch64/sve-intrinsics-gather-loads-unscaled-32bit-offsets.ll =================================================================== --- llvm/test/CodeGen/AArch64/sve-intrinsics-gather-loads-unscaled-32bit-offsets.ll +++ llvm/test/CodeGen/AArch64/sve-intrinsics-gather-loads-unscaled-32bit-offsets.ll @@ -10,9 +10,6 @@ define @gld1b_s_uxtw( %pg, i8* %base, %b) { ; CHECK-LABEL: gld1b_s_uxtw: ; CHECK: ld1b { z0.s }, p0/z, [x0, z0.s, uxtw] -; CHECK-NEXT: mov w8, #255 -; CHECK-NEXT: mov z1.s, w8 -; CHECK-NEXT: and z0.d, z0.d, z1.d ; CHECK-NEXT: ret %load = call @llvm.aarch64.sve.ld1.gather.uxtw.nxv4i8.nxv4i32( %pg, i8* %base, @@ -24,9 +21,6 @@ define @gld1b_s_sxtw( %pg, i8* %base, %b) { ; CHECK-LABEL: gld1b_s_sxtw: ; CHECK: ld1b { z0.s }, p0/z, [x0, z0.s, sxtw] -; CHECK-NEXT: mov w8, #255 -; CHECK-NEXT: mov z1.s, w8 -; CHECK-NEXT: and z0.d, z0.d, z1.d ; CHECK-NEXT: ret %load = call @llvm.aarch64.sve.ld1.gather.sxtw.nxv4i8.nxv4i32( %pg, i8* %base, @@ -38,9 +32,6 @@ define @gld1b_d_uxtw( %pg, i8* %base, %b) { ; CHECK-LABEL: gld1b_d_uxtw: ; CHECK: ld1b { z0.d }, p0/z, [x0, z0.d, uxtw] -; CHECK-NEXT: mov w8, #255 -; CHECK-NEXT: mov z1.d, x8 -; CHECK-NEXT: and z0.d, z0.d, z1.d ; CHECK-NEXT: ret %load = call @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i8.nxv2i64( %pg, i8* %base, @@ -52,9 +43,6 @@ define @gld1b_d_sxtw( %pg, i8* %base, %b) { ; CHECK-LABEL: gld1b_d_sxtw: ; CHECK: ld1b { z0.d }, p0/z, [x0, z0.d, sxtw] -; CHECK-NEXT: mov w8, #255 -; CHECK-NEXT: mov z1.d, x8 -; CHECK-NEXT: and z0.d, z0.d, z1.d ; CHECK-NEXT: ret %load = call @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i8.nxv2i64( %pg, i8* %base, @@ -67,9 +55,6 @@ define @gld1h_s_uxtw( %pg, i16* %base, %b) { ; CHECK-LABEL: gld1h_s_uxtw: ; CHECK: ld1h { z0.s }, p0/z, [x0, z0.s, uxtw] -; CHECK-NEXT: mov w8, #65535 -; CHECK-NEXT: mov z1.s, w8 -; CHECK-NEXT: and z0.d, z0.d, z1.d ; CHECK-NEXT: ret %load = call @llvm.aarch64.sve.ld1.gather.uxtw.nxv4i16.nxv4i32( %pg, i16* %base, @@ -81,9 +66,6 @@ define @gld1h_s_sxtw( %pg, i16* %base, %b) { ; CHECK-LABEL: gld1h_s_sxtw: ; CHECK: ld1h { z0.s }, p0/z, [x0, z0.s, sxtw] -; CHECK-NEXT: mov w8, #65535 -; CHECK-NEXT: mov z1.s, w8 -; CHECK-NEXT: and z0.d, z0.d, z1.d ; CHECK-NEXT: ret %load = call @llvm.aarch64.sve.ld1.gather.sxtw.nxv4i16.nxv4i32( %pg, i16* %base, @@ -95,9 +77,6 @@ define @gld1h_d_uxtw( %pg, i16* %base, %b) { ; CHECK-LABEL: gld1h_d_uxtw: ; CHECK: ld1h { z0.d }, p0/z, [x0, z0.d, uxtw] -; CHECK-NEXT: mov w8, #65535 -; CHECK-NEXT: mov z1.d, x8 -; CHECK-NEXT: and z0.d, z0.d, z1.d ; CHECK-NEXT: ret %load = call @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i16.nxv2i64( %pg, i16* %base, @@ -109,9 +88,6 @@ define @gld1h_d_sxtw( %pg, i16* %base, %b) { ; CHECK-LABEL: gld1h_d_sxtw: ; CHECK: ld1h { z0.d }, p0/z, [x0, z0.d, sxtw] -; CHECK-NEXT: mov w8, #65535 -; CHECK-NEXT: mov z1.d, x8 -; CHECK-NEXT: and z0.d, z0.d, z1.d ; CHECK-NEXT: ret %load = call @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i16.nxv2i64( %pg, i16* %base, @@ -144,9 +120,6 @@ define @gld1w_d_uxtw( %pg, i32* %base, %b) { ; CHECK-LABEL: gld1w_d_uxtw: ; CHECK: ld1w { z0.d }, p0/z, [x0, z0.d, uxtw] -; CHECK-NEXT: mov w8, #-1 -; CHECK-NEXT: mov z1.d, x8 -; CHECK-NEXT: and z0.d, z0.d, z1.d ; CHECK-NEXT: ret %load = call @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i32.nxv2i64( %pg, i32* %base, @@ -158,9 +131,6 @@ define @gld1w_d_sxtw( %pg, i32* %base, %b) { ; CHECK-LABEL: gld1w_d_sxtw: ; CHECK: ld1w { z0.d }, p0/z, [x0, z0.d, sxtw] -; CHECK-NEXT: mov w8, #-1 -; CHECK-NEXT: mov z1.d, x8 -; CHECK-NEXT: and z0.d, z0.d, z1.d ; CHECK-NEXT: ret %load = call @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i32.nxv2i64( %pg, i32* %base, @@ -230,19 +200,138 @@ ret %load } -; LD1B +; +; LD1SB, LD1SW, LD1SH: base + 32-bit unscaled offset, sign (sxtw) or zero +; (uxtw) extended to 64 bits. +; e.g. ld1sh { z0.d }, p0/z, [x0, z0.d, uxtw] +; + +; LD1SB +define @gld1sb_s_uxtw( %pg, i8* %base, %b) { +; CHECK-LABEL: gld1sb_s_uxtw: +; CHECK: ld1sb { z0.s }, p0/z, [x0, z0.s, uxtw] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ld1.gather.uxtw.nxv4i8.nxv4i32( %pg, + i8* %base, + %b) + %res = sext %load to + ret %res +} + +define @gld1sb_s_sxtw( %pg, i8* %base, %b) { +; CHECK-LABEL: gld1sb_s_sxtw: +; CHECK: ld1sb { z0.s }, p0/z, [x0, z0.s, sxtw] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ld1.gather.sxtw.nxv4i8.nxv4i32( %pg, + i8* %base, + %b) + %res = sext %load to + ret %res +} + +define @gld1sb_d_uxtw( %pg, i8* %base, %b) { +; CHECK-LABEL: gld1sb_d_uxtw: +; CHECK: ld1sb { z0.d }, p0/z, [x0, z0.d, uxtw] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i8.nxv2i64( %pg, + i8* %base, + %b) + %res = sext %load to + ret %res +} + +define @gld1sb_d_sxtw( %pg, i8* %base, %b) { +; CHECK-LABEL: gld1sb_d_sxtw: +; CHECK: ld1sb { z0.d }, p0/z, [x0, z0.d, sxtw] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i8.nxv2i64( %pg, + i8* %base, + %b) + %res = sext %load to + ret %res +} + +; LD1SH +define @gld1sh_s_uxtw( %pg, i16* %base, %b) { +; CHECK-LABEL: gld1sh_s_uxtw: +; CHECK: ld1sh { z0.s }, p0/z, [x0, z0.s, uxtw] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ld1.gather.uxtw.nxv4i16.nxv4i32( %pg, + i16* %base, + %b) + %res = sext %load to + ret %res +} + +define @gld1sh_s_sxtw( %pg, i16* %base, %b) { +; CHECK-LABEL: gld1sh_s_sxtw: +; CHECK: ld1sh { z0.s }, p0/z, [x0, z0.s, sxtw] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ld1.gather.sxtw.nxv4i16.nxv4i32( %pg, + i16* %base, + %b) + %res = sext %load to + ret %res +} + +define @gld1sh_d_uxtw( %pg, i16* %base, %b) { +; CHECK-LABEL: gld1sh_d_uxtw: +; CHECK: ld1sh { z0.d }, p0/z, [x0, z0.d, uxtw] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i16.nxv2i64( %pg, + i16* %base, + %b) + %res = sext %load to + ret %res +} + +define @gld1sh_d_sxtw( %pg, i16* %base, %b) { +; CHECK-LABEL: gld1sh_d_sxtw: +; CHECK: ld1sh { z0.d }, p0/z, [x0, z0.d, sxtw] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i16.nxv2i64( %pg, + i16* %base, + %b) + %res = sext %load to + ret %res +} + +; LD1SW +define @gld1sw_d_uxtw( %pg, i32* %base, %b) { +; CHECK-LABEL: gld1sw_d_uxtw: +; CHECK: ld1sw { z0.d }, p0/z, [x0, z0.d, uxtw] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i32.nxv2i64( %pg, + i32* %base, + %b) + %res = sext %load to + ret %res +} + +define @gld1sw_d_sxtw( %pg, i32* %base, %b) { +; CHECK-LABEL: gld1sw_d_sxtw: +; CHECK: ld1sw { z0.d }, p0/z, [x0, z0.d, sxtw] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i32.nxv2i64( %pg, + i32* %base, + %b) + %res = sext %load to + ret %res +} + +; LD1B/LD1SB declare @llvm.aarch64.sve.ld1.gather.uxtw.nxv4i8.nxv4i32(, i8*, ) declare @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i8.nxv2i64(, i8*, ) declare @llvm.aarch64.sve.ld1.gather.sxtw.nxv4i8.nxv4i32(, i8*, ) declare @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i8.nxv2i64(, i8*, ) -; LD1H +; LD1H/LD1SH declare @llvm.aarch64.sve.ld1.gather.sxtw.nxv4i16.nxv4i32(, i16*, ) declare @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i16.nxv2i64(, i16*, ) declare @llvm.aarch64.sve.ld1.gather.uxtw.nxv4i16.nxv4i32(, i16*, ) declare @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i16.nxv2i64(, i16*, ) -; LD1W +; LD1W/LD1SW declare @llvm.aarch64.sve.ld1.gather.sxtw.nxv4i32.nxv4i32(, i32*, ) declare @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i32.nxv2i64(, i32*, ) declare @llvm.aarch64.sve.ld1.gather.uxtw.nxv4i32.nxv4i32(, i32*, ) Index: llvm/test/CodeGen/AArch64/sve-intrinsics-gather-loads-vector-base.ll =================================================================== --- llvm/test/CodeGen/AArch64/sve-intrinsics-gather-loads-vector-base.ll +++ llvm/test/CodeGen/AArch64/sve-intrinsics-gather-loads-vector-base.ll @@ -9,9 +9,6 @@ define @gld1b_s_imm( %pg, %base) { ; CHECK-LABEL: gld1b_s_imm: ; CHECK: ld1b { z0.s }, p0/z, [z0.s, #16] -; CHECK-NEXT: mov w8, #255 -; CHECK-NEXT: mov z1.s, w8 -; CHECK-NEXT: and z0.d, z0.d, z1.d ; CHECK-NEXT: ret %load = call @llvm.aarch64.sve.ld1.gather.imm.nxv4i8.nxv4i32( %pg, %base, @@ -23,9 +20,6 @@ define @gld1b_d_imm( %pg, %base) { ; CHECK-LABEL: gld1b_d_imm: ; CHECK: ld1b { z0.d }, p0/z, [z0.d, #16] -; CHECK-NEXT: mov w8, #255 -; CHECK-NEXT: mov z1.d, x8 -; CHECK-NEXT: and z0.d, z0.d, z1.d ; CHECK-NEXT: ret %load = call @llvm.aarch64.sve.ld1.gather.imm.nxv2i8.nxv2i64( %pg, %base, @@ -38,9 +32,6 @@ define @gld1h_s_imm( %pg, %base) { ; CHECK-LABEL: gld1h_s_imm: ; CHECK: ld1h { z0.s }, p0/z, [z0.s, #16] -; CHECK-NEXT: mov w8, #65535 -; CHECK-NEXT: mov z1.s, w8 -; CHECK-NEXT: and z0.d, z0.d, z1.d ; CHECK-NEXT: ret %load = call @llvm.aarch64.sve.ld1.gather.imm.nxv4i16.nxv4i32( %pg, %base, @@ -52,9 +43,6 @@ define @gld1h_d_imm( %pg, %base) { ; CHECK-LABEL: gld1h_d_imm: ; CHECK: ld1h { z0.d }, p0/z, [z0.d, #16] -; CHECK-NEXT: mov w8, #65535 -; CHECK-NEXT: mov z1.d, x8 -; CHECK-NEXT: and z0.d, z0.d, z1.d ; CHECK-NEXT: ret %load = call @llvm.aarch64.sve.ld1.gather.imm.nxv2i16.nxv2i64( %pg, %base, @@ -77,9 +65,6 @@ define @gld1w_d_imm( %pg, %base) { ; CHECK-LABEL: gld1w_d_imm: ; CHECK: ld1w { z0.d }, p0/z, [z0.d, #16] -; CHECK-NEXT: mov w8, #-1 -; CHECK-NEXT: mov z1.d, x8 -; CHECK-NEXT: and z0.d, z0.d, z1.d ; CHECK-NEXT: ret %load = call @llvm.aarch64.sve.ld1.gather.imm.nxv2i32.nxv2i64( %pg, %base, @@ -119,15 +104,77 @@ ret %load } -; LD1B +; LD1SB, LD1SW, LD1SH: vector + immediate (index) +; e.g. ld1sh { z0.s }, p0/z, [z0.s, #16] +; + +; LD1SB +define @gld1sb_s_imm( %pg, %base) { +; CHECK-LABEL: gld1sb_s_imm: +; CHECK: ld1sb { z0.s }, p0/z, [z0.s, #16] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ld1.gather.imm.nxv4i8.nxv4i32( %pg, + %base, + i64 16) + %res = sext %load to + ret %res +} + +define @gld1sb_d_imm( %pg, %base) { +; CHECK-LABEL: gld1sb_d_imm: +; CHECK: ld1sb { z0.d }, p0/z, [z0.d, #16] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ld1.gather.imm.nxv2i8.nxv2i64( %pg, + %base, + i64 16) + %res = sext %load to + ret %res +} + +; LD1SH +define @gld1sh_s_imm( %pg, %base) { +; CHECK-LABEL: gld1sh_s_imm: +; CHECK: ld1sh { z0.s }, p0/z, [z0.s, #16] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ld1.gather.imm.nxv4i16.nxv4i32( %pg, + %base, + i64 16) + %res = sext %load to + ret %res +} + +define @gld1sh_d_imm( %pg, %base) { +; CHECK-LABEL: gld1sh_d_imm: +; CHECK: ld1sh { z0.d }, p0/z, [z0.d, #16] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ld1.gather.imm.nxv2i16.nxv2i64( %pg, + %base, + i64 16) + %res = sext %load to + ret %res +} + +; LD1SW +define @gld1sw_d_imm( %pg, %base) { +; CHECK-LABEL: gld1sw_d_imm: +; CHECK: ld1sw { z0.d }, p0/z, [z0.d, #16] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ld1.gather.imm.nxv2i32.nxv2i64( %pg, + %base, + i64 16) + %res = sext %load to + ret %res +} + +; LD1B/LD1SB declare @llvm.aarch64.sve.ld1.gather.imm.nxv4i8.nxv4i32(, , i64) declare @llvm.aarch64.sve.ld1.gather.imm.nxv2i8.nxv2i64(, , i64) -; LD1H +; LD1H/LD1SH declare @llvm.aarch64.sve.ld1.gather.imm.nxv4i16.nxv4i32(, , i64) declare @llvm.aarch64.sve.ld1.gather.imm.nxv2i16.nxv2i64(, , i64) -; LD1W +; LD1W/LD1SW declare @llvm.aarch64.sve.ld1.gather.imm.nxv4i32.nxv4i32(, , i64) declare @llvm.aarch64.sve.ld1.gather.imm.nxv2i32.nxv2i64(, , i64)