Index: llvm/include/llvm/IR/IntrinsicsAArch64.td =================================================================== --- llvm/include/llvm/IR/IntrinsicsAArch64.td +++ llvm/include/llvm/IR/IntrinsicsAArch64.td @@ -1176,6 +1176,7 @@ def int_aarch64_sve_ldnt1 : AdvSIMD_1Vec_PredLoad_Intrinsic; def int_aarch64_sve_ldnf1 : AdvSIMD_1Vec_PredFaultingLoad_Intrinsic; +def int_aarch64_sve_ldff1 : AdvSIMD_1Vec_PredFaultingLoad_Intrinsic; // // Stores Index: llvm/lib/Target/AArch64/AArch64ISelLowering.h =================================================================== --- llvm/lib/Target/AArch64/AArch64ISelLowering.h +++ llvm/lib/Target/AArch64/AArch64ISelLowering.h @@ -217,6 +217,8 @@ LDNF1, LDNF1S, + LDFF1, + LDFF1S, // Unsigned gather loads. GLD1, Index: llvm/lib/Target/AArch64/AArch64ISelLowering.cpp =================================================================== --- llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -1361,6 +1361,8 @@ case AArch64ISD::PTRUE: return "AArch64ISD::PTRUE"; case AArch64ISD::LDNF1: return "AArch64ISD::LDNF1"; case AArch64ISD::LDNF1S: return "AArch64ISD::LDNF1S"; + case AArch64ISD::LDFF1: return "AArch64ISD::LDFF1"; + case AArch64ISD::LDFF1S: return "AArch64ISD::LDFF1S"; case AArch64ISD::GLD1: return "AArch64ISD::GLD1"; case AArch64ISD::GLD1_SCALED: return "AArch64ISD::GLD1_SCALED"; case AArch64ISD::GLD1_SXTW: return "AArch64ISD::GLD1_SXTW"; @@ -10181,6 +10183,7 @@ // perfect candidates for combining. switch (Src->getOpcode()) { case AArch64ISD::LDNF1: + case AArch64ISD::LDFF1: MemVT = cast(Src->getOperand(3))->getVT(); break; case AArch64ISD::GLD1: @@ -11242,7 +11245,7 @@ ISD::UNINDEXED, false, false); } -static SDValue performLDNF1Combine(SDNode *N, SelectionDAG &DAG) { +static SDValue performLDNF1Combine(SDNode *N, SelectionDAG &DAG, bool isFF) { SDLoc DL(N); EVT VT = N->getValueType(0); @@ -11259,7 +11262,8 @@ N->getOperand(3), // Base DAG.getValueType(VT) }; - SDValue Load = DAG.getNode(AArch64ISD::LDNF1, DL, VTs, Ops); + unsigned Opc = isFF ? AArch64ISD::LDFF1 : AArch64ISD::LDNF1; + SDValue Load = DAG.getNode(Opc, DL, VTs, Ops); SDValue LoadChain = SDValue(Load.getNode(), 1); if (ContainerVT.isInteger() && (VT != ContainerVT)) @@ -12464,6 +12468,9 @@ case AArch64ISD::LDNF1: NewOpc = AArch64ISD::LDNF1S; break; + case AArch64ISD::LDFF1: + NewOpc = AArch64ISD::LDFF1S; + break; case AArch64ISD::GLD1: NewOpc = AArch64ISD::GLD1S; break; @@ -12491,7 +12498,7 @@ EVT SignExtSrcVT = cast(N->getOperand(1))->getVT(); - unsigned OpNum = NewOpc == AArch64ISD::LDNF1S ? 3 : 4; + unsigned OpNum = (NewOpc == AArch64ISD::LDNF1S || NewOpc == AArch64ISD::LDFF1S) ? 3 : 4; EVT LD1SrcMemVT = cast(Src->getOperand(OpNum))->getVT(); if ((SignExtSrcVT != LD1SrcMemVT) || !Src.hasOneUse()) @@ -12505,7 +12512,7 @@ // For gather loads there is an extra argument for the // offset, so we'll need to add an extra arg to Ops here - if (NewOpc != AArch64ISD::LDNF1S) + if (NewOpc != AArch64ISD::LDNF1S && NewOpc != AArch64ISD::LDFF1S) Ops.push_back(Src->getOperand(4)); SDValue ExtLoad = DAG.getNode(NewOpc, SDLoc(N), VTs, Ops); @@ -12605,7 +12612,9 @@ case Intrinsic::aarch64_sve_ldnt1: return performLDNT1Combine(N, DAG); case Intrinsic::aarch64_sve_ldnf1: - return performLDNF1Combine(N, DAG); + return performLDNF1Combine(N, DAG, false); + case Intrinsic::aarch64_sve_ldff1: + return performLDNF1Combine(N, DAG, true); case Intrinsic::aarch64_sve_stnt1: return performSTNT1Combine(N, DAG); case Intrinsic::aarch64_sve_ld1_gather: Index: llvm/lib/Target/AArch64/AArch64InstrInfo.td =================================================================== --- llvm/lib/Target/AArch64/AArch64InstrInfo.td +++ llvm/lib/Target/AArch64/AArch64InstrInfo.td @@ -553,6 +553,7 @@ ]>; def AArch64ldnf1 : SDNode<"AArch64ISD::LDNF1", SDT_AArch64_LDNF1, [SDNPHasChain, SDNPMayLoad, SDNPOptInGlue]>; +def AArch64ldff1 : SDNode<"AArch64ISD::LDFF1", SDT_AArch64_LDNF1, [SDNPHasChain, SDNPMayLoad, SDNPOptInGlue]>; //===----------------------------------------------------------------------===// Index: llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td =================================================================== --- llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td +++ llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td @@ -47,6 +47,7 @@ def AArch64ld1_gather_imm : SDNode<"AArch64ISD::GLD1_IMM", SDT_AArch64_GLD1_IMM, [SDNPHasChain, SDNPMayLoad, SDNPOptInGlue]>; def AArch64ldnf1s : SDNode<"AArch64ISD::LDNF1S", SDT_AArch64_LDNF1, [SDNPHasChain, SDNPMayLoad, SDNPOptInGlue]>; +def AArch64ldff1s : SDNode<"AArch64ISD::LDFF1S", SDT_AArch64_LDNF1, [SDNPHasChain, SDNPMayLoad, SDNPOptInGlue]>; def AArch64ld1s_gather : SDNode<"AArch64ISD::GLD1S", SDT_AArch64_GLD1, [SDNPHasChain, SDNPMayLoad, SDNPOptInGlue]>; def AArch64ld1s_gather_scaled : SDNode<"AArch64ISD::GLD1S_SCALED", SDT_AArch64_GLD1, [SDNPHasChain, SDNPMayLoad, SDNPOptInGlue]>; def AArch64ld1s_gather_uxtw : SDNode<"AArch64ISD::GLD1S_UXTW", SDT_AArch64_GLD1, [SDNPHasChain, SDNPMayLoad, SDNPOptInGlue]>; @@ -1266,6 +1267,40 @@ // 16-element contiguous non-faulting loads defm : ldnf1; + multiclass ldff1 { + // Add more complex addressing modes here as required. + // Base + def : Pat<(Ty (Load (PredTy PPR:$gp), GPR64:$base, MemVT)), + (I PPR:$gp, GPR64sp:$base, XZR)>; + } + + // 2-element contiguous first faulting loads + defm : ldff1; + defm : ldff1; + defm : ldff1; + defm : ldff1; + defm : ldff1; + defm : ldff1; + defm : ldff1; + defm : ldff1; + defm : ldff1; + + // 4-element contiguous first faulting loads + defm : ldff1; + defm : ldff1; + defm : ldff1; + defm : ldff1; + defm : ldff1; + defm : ldff1; + + // 8-element contiguous first faulting loads + defm : ldff1; + defm : ldff1; + defm : ldff1; + defm : ldff1; + + // 16-element contiguous first faulting loads + defm : ldff1; } let Predicates = [HasSVE2] in { Index: llvm/lib/Target/AArch64/SVEInstrFormats.td =================================================================== --- llvm/lib/Target/AArch64/SVEInstrFormats.td +++ llvm/lib/Target/AArch64/SVEInstrFormats.td @@ -5780,6 +5780,13 @@ def : InstAlias(NAME # _REAL) zprty:$Zt, PPR3bAny:$Pg, GPR64sp:$Rn, XZR), 0>; + + // We need a layer of indirection because early machine code passes balk at + // physical register (i.e. FFR) uses that have no previous definition. + let hasSideEffects = 1, hasNoSchedulingInfo = 1 in { + def "" : Pseudo<(outs listty:$Zt), (ins PPR3bAny:$Pg, GPR64sp:$Rn, gprty:$Rm), []>, + PseudoInstExpansion<(!cast(NAME # _REAL) listty:$Zt, PPR3bAny:$Pg, GPR64sp:$Rn, gprty:$Rm)>; + } } multiclass sve_mem_cldnf_si dtype, string asm, RegisterOperand listty, Index: llvm/test/CodeGen/AArch64/sve-intrinsics-loads-ff.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AArch64/sve-intrinsics-loads-ff.ll @@ -0,0 +1,220 @@ +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s + +; +; LDFF1B +; + +define @ldff1b( %pg, i8* %a) { +; CHECK-LABEL: ldff1b: +; CHECK: ldff1b { z0.b }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ldff1.nxv16i8( %pg, i8* %a) + ret %load +} + +define @ldff1b_h( %pg, i8* %a) { +; CHECK-LABEL: ldff1b_h: +; CHECK: ldff1b { z0.h }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ldff1.nxv8i8( %pg, i8* %a) + %res = zext %load to + ret %res +} + +define @ldff1b_s( %pg, i8* %a) { +; CHECK-LABEL: ldff1b_s: +; CHECK: ldff1b { z0.s }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ldff1.nxv4i8( %pg, i8* %a) + %res = zext %load to + ret %res +} + +define @ldff1b_d( %pg, i8* %a) { +; CHECK-LABEL: ldff1b_d: +; CHECK: ldff1b { z0.d }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ldff1.nxv2i8( %pg, i8* %a) + %res = zext %load to + ret %res +} + +; +; LDFF1SB +; + +define @ldff1sb_h( %pg, i8* %a) { +; CHECK-LABEL: ldff1sb_h: +; CHECK: ldff1sb { z0.h }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ldff1.nxv8i8( %pg, i8* %a) + %res = sext %load to + ret %res +} + +define @ldff1sb_s( %pg, i8* %a) { +; CHECK-LABEL: ldff1sb_s: +; CHECK: ldff1sb { z0.s }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ldff1.nxv4i8( %pg, i8* %a) + %res = sext %load to + ret %res +} + +define @ldff1sb_d( %pg, i8* %a) { +; CHECK-LABEL: ldff1sb_d: +; CHECK: ldff1sb { z0.d }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ldff1.nxv2i8( %pg, i8* %a) + %res = sext %load to + ret %res +} + +; +; LDFF1H +; + +define @ldff1h( %pg, i16* %a) { +; CHECK-LABEL: ldff1h: +; CHECK: ldff1h { z0.h }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ldff1.nxv8i16( %pg, i16* %a) + ret %load +} + +define @ldff1h_s( %pg, i16* %a) { +; CHECK-LABEL: ldff1h_s: +; CHECK: ldff1h { z0.s }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ldff1.nxv4i16( %pg, i16* %a) + %res = zext %load to + ret %res +} + +define @ldff1h_d( %pg, i16* %a) { +; CHECK-LABEL: ldff1h_d: +; CHECK: ldff1h { z0.d }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ldff1.nxv2i16( %pg, i16* %a) + %res = zext %load to + ret %res +} + +define @ldff1h_f16( %pg, half* %a) { +; CHECK-LABEL: ldff1h_f16: +; CHECK: ldff1h { z0.h }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ldff1.nxv8f16( %pg, half* %a) + ret %load +} + +; +; LDFF1SH +; + +define @ldff1sh_s( %pg, i16* %a) { +; CHECK-LABEL: ldff1sh_s: +; CHECK: ldff1sh { z0.s }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ldff1.nxv4i16( %pg, i16* %a) + %res = sext %load to + ret %res +} + +define @ldff1sh_d( %pg, i16* %a) { +; CHECK-LABEL: ldff1sh_d: +; CHECK: ldff1sh { z0.d }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ldff1.nxv2i16( %pg, i16* %a) + %res = sext %load to + ret %res +} + +; +; LDFF1W +; + +define @ldff1w( %pg, i32* %a) { +; CHECK-LABEL: ldff1w: +; CHECK: ldff1w { z0.s }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ldff1.nxv4i32( %pg, i32* %a) + ret %load +} + +define @ldff1w_d( %pg, i32* %a) { +; CHECK-LABEL: ldff1w_d: +; CHECK: ldff1w { z0.d }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ldff1.nxv2i32( %pg, i32* %a) + %res = zext %load to + ret %res +} + +define @ldff1w_f32( %pg, float* %a) { +; CHECK-LABEL: ldff1w_f32: +; CHECK: ldff1w { z0.s }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ldff1.nxv4f32( %pg, float* %a) + ret %load +} + +define @ldff1w_2f32( %pg, float* %a) { +; CHECK-LABEL: ldff1w_2f32: +; CHECK: ldff1w { z0.d }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ldff1.nxv2f32( %pg, float* %a) + ret %load +} + +; +; LDFF1SW +; + +define @ldff1sw_d( %pg, i32* %a) { +; CHECK-LABEL: ldff1sw_d: +; CHECK: ldff1sw { z0.d }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ldff1.nxv2i32( %pg, i32* %a) + %res = sext %load to + ret %res +} + +; +; LDFF1D +; + +define @ldff1d( %pg, i64* %a) { +; CHECK-LABEL: ldff1d: +; CHECK: ldff1d { z0.d }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ldff1.nxv2i64( %pg, i64* %a) + ret %load +} + + +define @ldff1d_f64( %pg, double* %a) { +; CHECK-LABEL: ldff1d_f64: +; CHECK: ldff1d { z0.d }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ldff1.nxv2f64( %pg, double* %a) + ret %load +} + +declare @llvm.aarch64.sve.ldff1.nxv16i8(, i8*) + +declare @llvm.aarch64.sve.ldff1.nxv8i8(, i8*) +declare @llvm.aarch64.sve.ldff1.nxv8i16(, i16*) +declare @llvm.aarch64.sve.ldff1.nxv8f16(, half*) + +declare @llvm.aarch64.sve.ldff1.nxv4i8(, i8*) +declare @llvm.aarch64.sve.ldff1.nxv4i16(, i16*) +declare @llvm.aarch64.sve.ldff1.nxv4i32(, i32*) +declare @llvm.aarch64.sve.ldff1.nxv2f32(, float*) +declare @llvm.aarch64.sve.ldff1.nxv4f32(, float*) + +declare @llvm.aarch64.sve.ldff1.nxv2i8(, i8*) +declare @llvm.aarch64.sve.ldff1.nxv2i16(, i16*) +declare @llvm.aarch64.sve.ldff1.nxv2i32(, i32*) +declare @llvm.aarch64.sve.ldff1.nxv2i64(, i64*) +declare @llvm.aarch64.sve.ldff1.nxv2f64(, double*)