Index: llvm/include/llvm/IR/IntrinsicsAArch64.td =================================================================== --- llvm/include/llvm/IR/IntrinsicsAArch64.td +++ llvm/include/llvm/IR/IntrinsicsAArch64.td @@ -775,6 +775,12 @@ LLVMPointerTo<0>], [IntrReadMem, IntrArgMemOnly]>; + class AdvSIMD_1Vec_PredFaultingLoad_Intrinsic + : Intrinsic<[llvm_anyvector_ty], + [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + LLVMPointerToElt<0>], + [IntrReadMem, IntrArgMemOnly]>; + class AdvSIMD_1Vec_PredStore_Intrinsic : Intrinsic<[], [llvm_anyvector_ty, @@ -1169,6 +1175,8 @@ def int_aarch64_sve_ldnt1 : AdvSIMD_1Vec_PredLoad_Intrinsic; +def int_aarch64_sve_ldnf1 : AdvSIMD_1Vec_PredFaultingLoad_Intrinsic; + // // Stores // Index: llvm/lib/Target/AArch64/AArch64ISelLowering.h =================================================================== --- llvm/lib/Target/AArch64/AArch64ISelLowering.h +++ llvm/lib/Target/AArch64/AArch64ISelLowering.h @@ -215,6 +215,9 @@ PTEST, PTRUE, + LDNF1, + LDNF1S, + // Unsigned gather loads. GLD1, GLD1_SCALED, Index: llvm/lib/Target/AArch64/AArch64ISelLowering.cpp =================================================================== --- llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -1359,6 +1359,8 @@ case AArch64ISD::INSR: return "AArch64ISD::INSR"; case AArch64ISD::PTEST: return "AArch64ISD::PTEST"; case AArch64ISD::PTRUE: return "AArch64ISD::PTRUE"; + case AArch64ISD::LDNF1: return "AArch64ISD::LDNF1"; + case AArch64ISD::LDNF1S: return "AArch64ISD::LDNF1S"; case AArch64ISD::GLD1: return "AArch64ISD::GLD1"; case AArch64ISD::GLD1_SCALED: return "AArch64ISD::GLD1_SCALED"; case AArch64ISD::GLD1_SXTW: return "AArch64ISD::GLD1_SXTW"; @@ -10173,9 +10175,14 @@ if (!Src.hasOneUse()) return SDValue(); - // GLD1* instructions perform an implicit zero-extend, which makes them + EVT MemVT; + + // SVE load instructions perform an implicit zero-extend, which makes them // perfect candidates for combining. switch (Src->getOpcode()) { + case AArch64ISD::LDNF1: + MemVT = cast(Src->getOperand(3))->getVT(); + break; case AArch64ISD::GLD1: case AArch64ISD::GLD1_SCALED: case AArch64ISD::GLD1_SXTW: @@ -10183,13 +10190,12 @@ case AArch64ISD::GLD1_UXTW: case AArch64ISD::GLD1_UXTW_SCALED: case AArch64ISD::GLD1_IMM: + MemVT = cast(Src->getOperand(4))->getVT(); break; default: return SDValue(); } - EVT MemVT = cast(Src->getOperand(4))->getVT(); - if (isConstantSplatVectorMaskForType(Mask.getNode(), MemVT)) return Src; @@ -11165,6 +11171,35 @@ return NewST1; } +// Returns an SVE type that ContentTy can be trivially sign or zero extended +// into. +static MVT getSVEContainerType(EVT ContentTy) { + assert(ContentTy.isSimple() && "No SVE containers for extended types"); + + switch (ContentTy.getSimpleVT().SimpleTy) { + default: + llvm_unreachable("No known SVE container for this MVT type"); + case MVT::nxv2i8: + case MVT::nxv2i16: + case MVT::nxv2i32: + case MVT::nxv2i64: + case MVT::nxv2f32: + case MVT::nxv2f64: + return MVT::nxv2i64; + case MVT::nxv4i8: + case MVT::nxv4i16: + case MVT::nxv4i32: + case MVT::nxv4f32: + return MVT::nxv4i32; + case MVT::nxv8i8: + case MVT::nxv8i16: + case MVT::nxv8f16: + return MVT::nxv8i16; + case MVT::nxv16i8: + return MVT::nxv16i8; + } +} + static SDValue performLDNT1Combine(SDNode *N, SelectionDAG &DAG) { SDLoc DL(N); EVT VT = N->getValueType(0); @@ -11207,6 +11242,32 @@ ISD::UNINDEXED, false, false); } +static SDValue performLDNF1Combine(SDNode *N, SelectionDAG &DAG) { + SDLoc DL(N); + EVT VT = N->getValueType(0); + + if (VT.getSizeInBits().getKnownMinSize() > AArch64::SVEBitsPerBlock) + return SDValue(); + + EVT ContainerVT = VT; + if (ContainerVT.isInteger()) + ContainerVT = getSVEContainerType(ContainerVT); + + SDVTList VTs = DAG.getVTList(ContainerVT, MVT::Other); + SDValue Ops[] = { N->getOperand(0), // Chain + N->getOperand(2), // Pg + N->getOperand(3), // Base + DAG.getValueType(VT) }; + + SDValue Load = DAG.getNode(AArch64ISD::LDNF1, DL, VTs, Ops); + SDValue LoadChain = SDValue(Load.getNode(), 1); + + if (ContainerVT.isInteger() && (VT != ContainerVT)) + Load = DAG.getNode(ISD::TRUNCATE, DL, VT, Load.getValue(0)); + + return DAG.getMergeValues({ Load, LoadChain }, DL); +} + /// Replace a splat of zeros to a vector store by scalar stores of WZR/XZR. The /// load store optimizer pass will merge them to store pair stores. This should /// be better than a movi to create the vector zero followed by a vector store @@ -12258,29 +12319,6 @@ DAG.getConstant(MinOffset, DL, MVT::i64)); } -// Returns an SVE type that ContentTy can be trivially sign or zero extended -// into. -static MVT getSVEContainerType(EVT ContentTy) { - assert(ContentTy.isSimple() && "No SVE containers for extended types"); - - switch (ContentTy.getSimpleVT().SimpleTy) { - default: - llvm_unreachable("No known SVE container for this MVT type"); - case MVT::nxv2i8: - case MVT::nxv2i16: - case MVT::nxv2i32: - case MVT::nxv2i64: - case MVT::nxv2f32: - case MVT::nxv2f64: - return MVT::nxv2i64; - case MVT::nxv4i8: - case MVT::nxv4i16: - case MVT::nxv4i32: - case MVT::nxv4f32: - return MVT::nxv4i32; - } -} - static SDValue performST1ScatterCombine(SDNode *N, SelectionDAG &DAG, unsigned Opcode, bool OnlyPackedOffsets = true) { @@ -12419,10 +12457,15 @@ SDValue Src = N->getOperand(0); unsigned Opc = Src->getOpcode(); - // Gather load nodes (e.g. AArch64ISD::GLD1) are straightforward candidates + // SVE load nodes (e.g. AArch64ISD::GLD1) are straightforward candidates // for DAG Combine with SIGN_EXTEND_INREG. Bail out for all other nodes. unsigned NewOpc; + unsigned MemVTOpNum = 4; switch (Opc) { + case AArch64ISD::LDNF1: + NewOpc = AArch64ISD::LDNF1S; + MemVTOpNum = 3; + break; case AArch64ISD::GLD1: NewOpc = AArch64ISD::GLD1S; break; @@ -12449,15 +12492,17 @@ } EVT SignExtSrcVT = cast(N->getOperand(1))->getVT(); - EVT GLD1SrcMemVT = cast(Src->getOperand(4))->getVT(); + EVT SrcMemVT = cast(Src->getOperand(MemVTOpNum))->getVT(); - if ((SignExtSrcVT != GLD1SrcMemVT) || !Src.hasOneUse()) + if ((SignExtSrcVT != SrcMemVT) || !Src.hasOneUse()) return SDValue(); EVT DstVT = N->getValueType(0); SDVTList VTs = DAG.getVTList(DstVT, MVT::Other); - SDValue Ops[] = {Src->getOperand(0), Src->getOperand(1), Src->getOperand(2), - Src->getOperand(3), Src->getOperand(4)}; + + SmallVector Ops; + for (unsigned I = 0; I < Src->getNumOperands(); ++I) + Ops.push_back(Src->getOperand(I)); SDValue ExtLoad = DAG.getNode(NewOpc, SDLoc(N), VTs, Ops); DCI.CombineTo(N, ExtLoad); @@ -12555,6 +12600,8 @@ return performNEONPostLDSTCombine(N, DCI, DAG); case Intrinsic::aarch64_sve_ldnt1: return performLDNT1Combine(N, DAG); + case Intrinsic::aarch64_sve_ldnf1: + return performLDNF1Combine(N, DAG); case Intrinsic::aarch64_sve_stnt1: return performSTNT1Combine(N, DAG); case Intrinsic::aarch64_sve_ld1_gather: Index: llvm/lib/Target/AArch64/AArch64InstrInfo.td =================================================================== --- llvm/lib/Target/AArch64/AArch64InstrInfo.td +++ llvm/lib/Target/AArch64/AArch64InstrInfo.td @@ -547,6 +547,13 @@ def AArch64tbl : SDNode<"AArch64ISD::TBL", SDT_AArch64TBL>; +def SDT_AArch64_LDNF1 : SDTypeProfile<1, 3, [ + SDTCisVec<0>, SDTCisVec<1>, SDTCisPtrTy<2>, + SDTCVecEltisVT<1,i1>, SDTCisSameNumEltsAs<0,1> +]>; + +def AArch64ldnf1 : SDNode<"AArch64ISD::LDNF1", SDT_AArch64_LDNF1, [SDNPHasChain, SDNPMayLoad, SDNPOptInGlue]>; + //===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===// Index: llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td =================================================================== --- llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td +++ llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td @@ -46,6 +46,7 @@ def AArch64ld1_gather_sxtw_scaled : SDNode<"AArch64ISD::GLD1_SXTW_SCALED", SDT_AArch64_GLD1, [SDNPHasChain, SDNPMayLoad, SDNPOptInGlue]>; def AArch64ld1_gather_imm : SDNode<"AArch64ISD::GLD1_IMM", SDT_AArch64_GLD1_IMM, [SDNPHasChain, SDNPMayLoad, SDNPOptInGlue]>; +def AArch64ldnf1s : SDNode<"AArch64ISD::LDNF1S", SDT_AArch64_LDNF1, [SDNPHasChain, SDNPMayLoad, SDNPOptInGlue]>; def AArch64ld1s_gather : SDNode<"AArch64ISD::GLD1S", SDT_AArch64_GLD1, [SDNPHasChain, SDNPMayLoad, SDNPOptInGlue]>; def AArch64ld1s_gather_scaled : SDNode<"AArch64ISD::GLD1S_SCALED", SDT_AArch64_GLD1, [SDNPHasChain, SDNPMayLoad, SDNPOptInGlue]>; def AArch64ld1s_gather_uxtw : SDNode<"AArch64ISD::GLD1S_UXTW", SDT_AArch64_GLD1, [SDNPHasChain, SDNPMayLoad, SDNPOptInGlue]>; @@ -1231,6 +1232,40 @@ defm : pred_store; defm : pred_store; defm : pred_store; + + multiclass ldnf1 { + // base + def : Pat<(Ty (Load (PredTy PPR:$gp), GPR64:$base, MemVT)), + (I PPR:$gp, GPR64sp:$base, (i64 0))>; + } + + // 2-element contiguous non-faulting loads + defm : ldnf1; + defm : ldnf1; + defm : ldnf1; + defm : ldnf1; + defm : ldnf1; + defm : ldnf1; + defm : ldnf1; + defm : ldnf1; + + // 4-element contiguous non-faulting loads + defm : ldnf1; + defm : ldnf1; + defm : ldnf1; + defm : ldnf1; + defm : ldnf1; + defm : ldnf1; + + // 8-element contiguous non-faulting loads + defm : ldnf1; + defm : ldnf1; + defm : ldnf1; + defm : ldnf1; + + // 16-element contiguous non-faulting loads + defm : ldnf1; + } let Predicates = [HasSVE2] in { Index: llvm/lib/Target/AArch64/SVEInstrFormats.td =================================================================== --- llvm/lib/Target/AArch64/SVEInstrFormats.td +++ llvm/lib/Target/AArch64/SVEInstrFormats.td @@ -5557,14 +5557,21 @@ multiclass sve_mem_cld_si_base dtype, bit nf, string asm, RegisterOperand listty, ZPRRegOp zprty> { - def "" : sve_mem_cld_si_base; + def _REAL : sve_mem_cld_si_base; def : InstAlias(NAME) zprty:$Zt, PPR3bAny:$Pg, GPR64sp:$Rn, 0), 0>; + (!cast(NAME # _REAL) zprty:$Zt, PPR3bAny:$Pg, GPR64sp:$Rn, 0), 0>; def : InstAlias(NAME) zprty:$Zt, PPR3bAny:$Pg, GPR64sp:$Rn, simm4s1:$imm4), 0>; + (!cast(NAME # _REAL) zprty:$Zt, PPR3bAny:$Pg, GPR64sp:$Rn, simm4s1:$imm4), 0>; def : InstAlias(NAME) listty:$Zt, PPR3bAny:$Pg, GPR64sp:$Rn, 0), 1>; + (!cast(NAME # _REAL) listty:$Zt, PPR3bAny:$Pg, GPR64sp:$Rn, 0), 1>; + + // We need a layer of indirection because early machine code passes balk at + // physical register (i.e. FFR) uses that have no previous definition. + let hasSideEffects = 1, hasNoSchedulingInfo = 1, mayLoad = 1 in { + def "" : Pseudo<(outs listty:$Zt), (ins PPR3bAny:$Pg, GPR64sp:$Rn, simm4s1:$imm4), []>, + PseudoInstExpansion<(!cast(NAME # _REAL) listty:$Zt, PPR3bAny:$Pg, GPR64sp:$Rn, simm4s1:$imm4)>; + } } multiclass sve_mem_cld_si dtype, string asm, RegisterOperand listty, Index: llvm/test/CodeGen/AArch64/sve-intrinsics-loads-nf.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AArch64/sve-intrinsics-loads-nf.ll @@ -0,0 +1,182 @@ +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s + +define @ldnf1b( %pg, i8* %a) { +; CHECK-LABEL: ldnf1b: +; CHECK: ldnf1b { z0.b }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ldnf1.nxv16i8( %pg, i8* %a) + ret %load +} + +define @ldnf1b_h( %pg, i8* %a) { +; CHECK-LABEL: ldnf1b_h: +; CHECK: ldnf1b { z0.h }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ldnf1.nxv8i8( %pg, i8* %a) + %res = zext %load to + ret %res +} + +define @ldnf1sb_h( %pg, i8* %a) { +; CHECK-LABEL: ldnf1sb_h: +; CHECK: ldnf1sb { z0.h }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ldnf1.nxv8i8( %pg, i8* %a) + %res = sext %load to + ret %res +} + +define @ldnf1h( %pg, i16* %a) { +; CHECK-LABEL: ldnf1h: +; CHECK: ldnf1h { z0.h }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ldnf1.nxv8i16( %pg, i16* %a) + ret %load +} + +define @ldnf1h_f16( %pg, half* %a) { +; CHECK-LABEL: ldnf1h_f16: +; CHECK: ldnf1h { z0.h }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ldnf1.nxv8f16( %pg, half* %a) + ret %load +} + +define @ldnf1b_s( %pg, i8* %a) { +; CHECK-LABEL: ldnf1b_s: +; CHECK: ldnf1b { z0.s }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ldnf1.nxv4i8( %pg, i8* %a) + %res = zext %load to + ret %res +} + +define @ldnf1sb_s( %pg, i8* %a) { +; CHECK-LABEL: ldnf1sb_s: +; CHECK: ldnf1sb { z0.s }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ldnf1.nxv4i8( %pg, i8* %a) + %res = sext %load to + ret %res +} + +define @ldnf1h_s( %pg, i16* %a) { +; CHECK-LABEL: ldnf1h_s: +; CHECK: ldnf1h { z0.s }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ldnf1.nxv4i16( %pg, i16* %a) + %res = zext %load to + ret %res +} + +define @ldnf1sh_s( %pg, i16* %a) { +; CHECK-LABEL: ldnf1sh_s: +; CHECK: ldnf1sh { z0.s }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ldnf1.nxv4i16( %pg, i16* %a) + %res = sext %load to + ret %res +} + +define @ldnf1w( %pg, i32* %a) { +; CHECK-LABEL: ldnf1w: +; CHECK: ldnf1w { z0.s }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ldnf1.nxv4i32( %pg, i32* %a) + ret %load +} + +define @ldnf1w_f32( %pg, float* %a) { +; CHECK-LABEL: ldnf1w_f32: +; CHECK: ldnf1w { z0.s }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ldnf1.nxv4f32( %pg, float* %a) + ret %load +} + +define @ldnf1b_d( %pg, i8* %a) { +; CHECK-LABEL: ldnf1b_d: +; CHECK: ldnf1b { z0.d }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ldnf1.nxv2i8( %pg, i8* %a) + %res = zext %load to + ret %res +} + +define @ldnf1sb_d( %pg, i8* %a) { +; CHECK-LABEL: ldnf1sb_d: +; CHECK: ldnf1sb { z0.d }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ldnf1.nxv2i8( %pg, i8* %a) + %res = sext %load to + ret %res +} + +define @ldnf1h_d( %pg, i16* %a) { +; CHECK-LABEL: ldnf1h_d: +; CHECK: ldnf1h { z0.d }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ldnf1.nxv2i16( %pg, i16* %a) + %res = zext %load to + ret %res +} + +define @ldnf1sh_d( %pg, i16* %a) { +; CHECK-LABEL: ldnf1sh_d: +; CHECK: ldnf1sh { z0.d }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ldnf1.nxv2i16( %pg, i16* %a) + %res = sext %load to + ret %res +} + +define @ldnf1w_d( %pg, i32* %a) { +; CHECK-LABEL: ldnf1w_d: +; CHECK: ldnf1w { z0.d }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ldnf1.nxv2i32( %pg, i32* %a) + %res = zext %load to + ret %res +} + +define @ldnf1sw_d( %pg, i32* %a) { +; CHECK-LABEL: ldnf1sw_d: +; CHECK: ldnf1sw { z0.d }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ldnf1.nxv2i32( %pg, i32* %a) + %res = sext %load to + ret %res +} + +define @ldnf1d( %pg, i64* %a) { +; CHECK-LABEL: ldnf1d: +; CHECK: ldnf1d { z0.d }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ldnf1.nxv2i64( %pg, i64* %a) + ret %load +} + +define @ldnf1d_f64( %pg, double* %a) { +; CHECK-LABEL: ldnf1d_f64: +; CHECK: ldnf1d { z0.d }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ldnf1.nxv2f64( %pg, double* %a) + ret %load +} + +declare @llvm.aarch64.sve.ldnf1.nxv16i8(, i8*) + +declare @llvm.aarch64.sve.ldnf1.nxv8i8(, i8*) +declare @llvm.aarch64.sve.ldnf1.nxv8i16(, i16*) +declare @llvm.aarch64.sve.ldnf1.nxv8f16(, half*) + +declare @llvm.aarch64.sve.ldnf1.nxv4i8(, i8*) +declare @llvm.aarch64.sve.ldnf1.nxv4i16(, i16*) +declare @llvm.aarch64.sve.ldnf1.nxv4i32(, i32*) +declare @llvm.aarch64.sve.ldnf1.nxv4f32(, float*) + +declare @llvm.aarch64.sve.ldnf1.nxv2i8(, i8*) +declare @llvm.aarch64.sve.ldnf1.nxv2i16(, i16*) +declare @llvm.aarch64.sve.ldnf1.nxv2i32(, i32*) +declare @llvm.aarch64.sve.ldnf1.nxv2i64(, i64*) +declare @llvm.aarch64.sve.ldnf1.nxv2f64(, double*)