diff --git a/llvm/include/llvm/IR/IntrinsicsAArch64.td b/llvm/include/llvm/IR/IntrinsicsAArch64.td --- a/llvm/include/llvm/IR/IntrinsicsAArch64.td +++ b/llvm/include/llvm/IR/IntrinsicsAArch64.td @@ -1176,6 +1176,7 @@ def int_aarch64_sve_ldnt1 : AdvSIMD_1Vec_PredLoad_Intrinsic; def int_aarch64_sve_ldnf1 : AdvSIMD_1Vec_PredFaultingLoad_Intrinsic; +def int_aarch64_sve_ldff1 : AdvSIMD_1Vec_PredFaultingLoad_Intrinsic; // // Stores diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h @@ -217,6 +217,8 @@ LDNF1, LDNF1S, + LDFF1, + LDFF1S, // Unsigned gather loads. GLD1, diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -1375,6 +1375,8 @@ case AArch64ISD::PTRUE: return "AArch64ISD::PTRUE"; case AArch64ISD::LDNF1: return "AArch64ISD::LDNF1"; case AArch64ISD::LDNF1S: return "AArch64ISD::LDNF1S"; + case AArch64ISD::LDFF1: return "AArch64ISD::LDFF1"; + case AArch64ISD::LDFF1S: return "AArch64ISD::LDFF1S"; case AArch64ISD::GLD1: return "AArch64ISD::GLD1"; case AArch64ISD::GLD1_SCALED: return "AArch64ISD::GLD1_SCALED"; case AArch64ISD::GLD1_SXTW: return "AArch64ISD::GLD1_SXTW"; @@ -10237,6 +10239,7 @@ // perfect candidates for combining. switch (Src->getOpcode()) { case AArch64ISD::LDNF1: + case AArch64ISD::LDFF1: MemVT = cast(Src->getOperand(3))->getVT(); break; case AArch64ISD::GLD1: @@ -11298,7 +11301,7 @@ ISD::UNINDEXED, false, false); } -static SDValue performLDNF1Combine(SDNode *N, SelectionDAG &DAG) { +static SDValue performLDNF1Combine(SDNode *N, SelectionDAG &DAG, unsigned Opc) { SDLoc DL(N); EVT VT = N->getValueType(0); @@ -11315,7 +11318,7 @@ N->getOperand(3), // Base DAG.getValueType(VT) }; - SDValue Load = DAG.getNode(AArch64ISD::LDNF1, DL, VTs, Ops); + SDValue Load = DAG.getNode(Opc, DL, VTs, Ops); SDValue LoadChain = SDValue(Load.getNode(), 1); if (ContainerVT.isInteger() && (VT != ContainerVT)) @@ -12571,6 +12574,10 @@ NewOpc = AArch64ISD::LDNF1S; MemVTOpNum = 3; break; + case AArch64ISD::LDFF1: + NewOpc = AArch64ISD::LDFF1S; + MemVTOpNum = 3; + break; case AArch64ISD::GLD1: NewOpc = AArch64ISD::GLD1S; break; @@ -12706,7 +12713,9 @@ case Intrinsic::aarch64_sve_ldnt1: return performLDNT1Combine(N, DAG); case Intrinsic::aarch64_sve_ldnf1: - return performLDNF1Combine(N, DAG); + return performLDNF1Combine(N, DAG, AArch64ISD::LDNF1); + case Intrinsic::aarch64_sve_ldff1: + return performLDNF1Combine(N, DAG, AArch64ISD::LDFF1); case Intrinsic::aarch64_sve_stnt1: return performSTNT1Combine(N, DAG); case Intrinsic::aarch64_sve_ld1_gather: diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td @@ -549,13 +549,6 @@ def AArch64tbl : SDNode<"AArch64ISD::TBL", SDT_AArch64TBL>; -def SDT_AArch64_LDNF1 : SDTypeProfile<1, 3, [ - SDTCisVec<0>, SDTCisVec<1>, SDTCisPtrTy<2>, - SDTCVecEltisVT<1,i1>, SDTCisSameNumEltsAs<0,1> -]>; - -def AArch64ldnf1 : SDNode<"AArch64ISD::LDNF1", SDT_AArch64_LDNF1, [SDNPHasChain, SDNPMayLoad, SDNPOptInGlue]>; - //===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td --- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td @@ -10,6 +10,11 @@ // //===----------------------------------------------------------------------===// +def SDT_AArch64_LDNF1 : SDTypeProfile<1, 3, [ + SDTCisVec<0>, SDTCisVec<1>, SDTCisPtrTy<2>, + SDTCVecEltisVT<1,i1>, SDTCisSameNumEltsAs<0,1> +]>; + def SDT_AArch64_GLD1 : SDTypeProfile<1, 4, [ SDTCisVec<0>, SDTCisVec<1>, SDTCisPtrTy<2>, SDTCisVec<3>, SDTCisVT<4, OtherVT>, SDTCVecEltisVT<1,i1>, SDTCisSameNumEltsAs<0,1> @@ -38,6 +43,8 @@ def AArch64st1_scatter_sxtw_scaled : SDNode<"AArch64ISD::SST1_SXTW_SCALED", SDT_AArch64_SST1, [SDNPHasChain, SDNPMayStore, SDNPOptInGlue]>; def AArch64st1_scatter_imm : SDNode<"AArch64ISD::SST1_IMM", SDT_AArch64_SST1_IMM, [SDNPHasChain, SDNPMayStore, SDNPOptInGlue]>; +def AArch64ldnf1 : SDNode<"AArch64ISD::LDNF1", SDT_AArch64_LDNF1, [SDNPHasChain, SDNPMayLoad, SDNPOptInGlue]>; +def AArch64ldff1 : SDNode<"AArch64ISD::LDFF1", SDT_AArch64_LDNF1, [SDNPHasChain, SDNPMayLoad, SDNPOptInGlue]>; def AArch64ld1_gather : SDNode<"AArch64ISD::GLD1", SDT_AArch64_GLD1, [SDNPHasChain, SDNPMayLoad, SDNPOptInGlue]>; def AArch64ld1_gather_scaled : SDNode<"AArch64ISD::GLD1_SCALED", SDT_AArch64_GLD1, [SDNPHasChain, SDNPMayLoad, SDNPOptInGlue]>; def AArch64ld1_gather_uxtw : SDNode<"AArch64ISD::GLD1_UXTW", SDT_AArch64_GLD1, [SDNPHasChain, SDNPMayLoad, SDNPOptInGlue]>; @@ -58,6 +65,7 @@ def sve_cntd_imm_neg : ComplexPattern">; def AArch64ldnf1s : SDNode<"AArch64ISD::LDNF1S", SDT_AArch64_LDNF1, [SDNPHasChain, SDNPMayLoad, SDNPOptInGlue]>; +def AArch64ldff1s : SDNode<"AArch64ISD::LDFF1S", SDT_AArch64_LDNF1, [SDNPHasChain, SDNPMayLoad, SDNPOptInGlue]>; def AArch64ld1s_gather : SDNode<"AArch64ISD::GLD1S", SDT_AArch64_GLD1, [SDNPHasChain, SDNPMayLoad, SDNPOptInGlue]>; def AArch64ld1s_gather_scaled : SDNode<"AArch64ISD::GLD1S_SCALED", SDT_AArch64_GLD1, [SDNPHasChain, SDNPMayLoad, SDNPOptInGlue]>; def AArch64ld1s_gather_uxtw : SDNode<"AArch64ISD::GLD1S_UXTW", SDT_AArch64_GLD1, [SDNPHasChain, SDNPMayLoad, SDNPOptInGlue]>; @@ -1340,6 +1348,40 @@ // 16-element contiguous non-faulting loads defm : ldnf1; + multiclass ldff1 { + // Add more complex addressing modes here as required. + // Base + def : Pat<(Ty (Load (PredTy PPR:$gp), GPR64:$base, MemVT)), + (I PPR:$gp, GPR64sp:$base, XZR)>; + } + + // 2-element contiguous first faulting loads + defm : ldff1; + defm : ldff1; + defm : ldff1; + defm : ldff1; + defm : ldff1; + defm : ldff1; + defm : ldff1; + defm : ldff1; + defm : ldff1; + + // 4-element contiguous first faulting loads + defm : ldff1; + defm : ldff1; + defm : ldff1; + defm : ldff1; + defm : ldff1; + defm : ldff1; + + // 8-element contiguous first faulting loads + defm : ldff1; + defm : ldff1; + defm : ldff1; + defm : ldff1; + + // 16-element contiguous first faulting loads + defm : ldff1; } let Predicates = [HasSVE2] in { diff --git a/llvm/lib/Target/AArch64/SVEInstrFormats.td b/llvm/lib/Target/AArch64/SVEInstrFormats.td --- a/llvm/lib/Target/AArch64/SVEInstrFormats.td +++ b/llvm/lib/Target/AArch64/SVEInstrFormats.td @@ -5780,6 +5780,13 @@ def : InstAlias(NAME # _REAL) zprty:$Zt, PPR3bAny:$Pg, GPR64sp:$Rn, XZR), 0>; + + // We need a layer of indirection because early machine code passes balk at + // physical register (i.e. FFR) uses that have no previous definition. + let hasSideEffects = 1, hasNoSchedulingInfo = 1 in { + def "" : Pseudo<(outs listty:$Zt), (ins PPR3bAny:$Pg, GPR64sp:$Rn, gprty:$Rm), []>, + PseudoInstExpansion<(!cast(NAME # _REAL) listty:$Zt, PPR3bAny:$Pg, GPR64sp:$Rn, gprty:$Rm)>; + } } multiclass sve_mem_cldnf_si dtype, string asm, RegisterOperand listty, diff --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-loads-ff.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-loads-ff.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-loads-ff.ll @@ -0,0 +1,220 @@ +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s + +; +; LDFF1B +; + +define @ldff1b( %pg, i8* %a) { +; CHECK-LABEL: ldff1b: +; CHECK: ldff1b { z0.b }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ldff1.nxv16i8( %pg, i8* %a) + ret %load +} + +define @ldff1b_h( %pg, i8* %a) { +; CHECK-LABEL: ldff1b_h: +; CHECK: ldff1b { z0.h }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ldff1.nxv8i8( %pg, i8* %a) + %res = zext %load to + ret %res +} + +define @ldff1b_s( %pg, i8* %a) { +; CHECK-LABEL: ldff1b_s: +; CHECK: ldff1b { z0.s }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ldff1.nxv4i8( %pg, i8* %a) + %res = zext %load to + ret %res +} + +define @ldff1b_d( %pg, i8* %a) { +; CHECK-LABEL: ldff1b_d: +; CHECK: ldff1b { z0.d }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ldff1.nxv2i8( %pg, i8* %a) + %res = zext %load to + ret %res +} + +; +; LDFF1SB +; + +define @ldff1sb_h( %pg, i8* %a) { +; CHECK-LABEL: ldff1sb_h: +; CHECK: ldff1sb { z0.h }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ldff1.nxv8i8( %pg, i8* %a) + %res = sext %load to + ret %res +} + +define @ldff1sb_s( %pg, i8* %a) { +; CHECK-LABEL: ldff1sb_s: +; CHECK: ldff1sb { z0.s }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ldff1.nxv4i8( %pg, i8* %a) + %res = sext %load to + ret %res +} + +define @ldff1sb_d( %pg, i8* %a) { +; CHECK-LABEL: ldff1sb_d: +; CHECK: ldff1sb { z0.d }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ldff1.nxv2i8( %pg, i8* %a) + %res = sext %load to + ret %res +} + +; +; LDFF1H +; + +define @ldff1h( %pg, i16* %a) { +; CHECK-LABEL: ldff1h: +; CHECK: ldff1h { z0.h }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ldff1.nxv8i16( %pg, i16* %a) + ret %load +} + +define @ldff1h_s( %pg, i16* %a) { +; CHECK-LABEL: ldff1h_s: +; CHECK: ldff1h { z0.s }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ldff1.nxv4i16( %pg, i16* %a) + %res = zext %load to + ret %res +} + +define @ldff1h_d( %pg, i16* %a) { +; CHECK-LABEL: ldff1h_d: +; CHECK: ldff1h { z0.d }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ldff1.nxv2i16( %pg, i16* %a) + %res = zext %load to + ret %res +} + +define @ldff1h_f16( %pg, half* %a) { +; CHECK-LABEL: ldff1h_f16: +; CHECK: ldff1h { z0.h }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ldff1.nxv8f16( %pg, half* %a) + ret %load +} + +; +; LDFF1SH +; + +define @ldff1sh_s( %pg, i16* %a) { +; CHECK-LABEL: ldff1sh_s: +; CHECK: ldff1sh { z0.s }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ldff1.nxv4i16( %pg, i16* %a) + %res = sext %load to + ret %res +} + +define @ldff1sh_d( %pg, i16* %a) { +; CHECK-LABEL: ldff1sh_d: +; CHECK: ldff1sh { z0.d }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ldff1.nxv2i16( %pg, i16* %a) + %res = sext %load to + ret %res +} + +; +; LDFF1W +; + +define @ldff1w( %pg, i32* %a) { +; CHECK-LABEL: ldff1w: +; CHECK: ldff1w { z0.s }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ldff1.nxv4i32( %pg, i32* %a) + ret %load +} + +define @ldff1w_d( %pg, i32* %a) { +; CHECK-LABEL: ldff1w_d: +; CHECK: ldff1w { z0.d }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ldff1.nxv2i32( %pg, i32* %a) + %res = zext %load to + ret %res +} + +define @ldff1w_f32( %pg, float* %a) { +; CHECK-LABEL: ldff1w_f32: +; CHECK: ldff1w { z0.s }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ldff1.nxv4f32( %pg, float* %a) + ret %load +} + +define @ldff1w_2f32( %pg, float* %a) { +; CHECK-LABEL: ldff1w_2f32: +; CHECK: ldff1w { z0.d }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ldff1.nxv2f32( %pg, float* %a) + ret %load +} + +; +; LDFF1SW +; + +define @ldff1sw_d( %pg, i32* %a) { +; CHECK-LABEL: ldff1sw_d: +; CHECK: ldff1sw { z0.d }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ldff1.nxv2i32( %pg, i32* %a) + %res = sext %load to + ret %res +} + +; +; LDFF1D +; + +define @ldff1d( %pg, i64* %a) { +; CHECK-LABEL: ldff1d: +; CHECK: ldff1d { z0.d }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ldff1.nxv2i64( %pg, i64* %a) + ret %load +} + + +define @ldff1d_f64( %pg, double* %a) { +; CHECK-LABEL: ldff1d_f64: +; CHECK: ldff1d { z0.d }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ldff1.nxv2f64( %pg, double* %a) + ret %load +} + +declare @llvm.aarch64.sve.ldff1.nxv16i8(, i8*) + +declare @llvm.aarch64.sve.ldff1.nxv8i8(, i8*) +declare @llvm.aarch64.sve.ldff1.nxv8i16(, i16*) +declare @llvm.aarch64.sve.ldff1.nxv8f16(, half*) + +declare @llvm.aarch64.sve.ldff1.nxv4i8(, i8*) +declare @llvm.aarch64.sve.ldff1.nxv4i16(, i16*) +declare @llvm.aarch64.sve.ldff1.nxv4i32(, i32*) +declare @llvm.aarch64.sve.ldff1.nxv2f32(, float*) +declare @llvm.aarch64.sve.ldff1.nxv4f32(, float*) + +declare @llvm.aarch64.sve.ldff1.nxv2i8(, i8*) +declare @llvm.aarch64.sve.ldff1.nxv2i16(, i16*) +declare @llvm.aarch64.sve.ldff1.nxv2i32(, i32*) +declare @llvm.aarch64.sve.ldff1.nxv2i64(, i64*) +declare @llvm.aarch64.sve.ldff1.nxv2f64(, double*)