diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -1460,7 +1460,8 @@ setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom); setOperationAction(ISD::VECREDUCE_FMAXIMUM, VT, Custom); setOperationAction(ISD::VECREDUCE_FMINIMUM, VT, Custom); - setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom); + if (Subtarget->isFullSVEAvailable()) + setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom); setOperationAction(ISD::VECTOR_SPLICE, VT, Custom); setOperationAction(ISD::VECTOR_DEINTERLEAVE, VT, Custom); setOperationAction(ISD::VECTOR_INTERLEAVE, VT, Custom); @@ -1520,9 +1521,12 @@ setOperationAction(ISD::MUL, MVT::v1i64, Custom); setOperationAction(ISD::MUL, MVT::v2i64, Custom); - // NEON doesn't support across-vector reductions, but SVE does. - for (auto VT : {MVT::v4f16, MVT::v8f16, MVT::v2f32, MVT::v4f32, MVT::v2f64}) - setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom); + if (Subtarget->isFullSVEAvailable()) { + // NEON doesn't support across-vector reductions, but SVE does. + for (auto VT : + {MVT::v4f16, MVT::v8f16, MVT::v2f32, MVT::v4f32, MVT::v2f64}) + setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom); + } if (!Subtarget->isNeonAvailable()) { setTruncStoreAction(MVT::v2f32, MVT::v2f16, Custom); @@ -1880,7 +1884,8 @@ setOperationAction(ISD::VECREDUCE_FMAXIMUM, VT, Custom); setOperationAction(ISD::VECREDUCE_FMINIMUM, VT, Custom); setOperationAction(ISD::VECREDUCE_OR, VT, Custom); - setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom); + setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, + StreamingSVE ? Expand : Custom); setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom); setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom); setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom); @@ -19547,7 +19552,9 @@ N->getOperand(3), DAG.getCondCode(ISD::SETUO)); break; case Intrinsic::aarch64_sve_fadda: - return combineSVEReductionOrderedFP(N, AArch64ISD::FADDA_PRED, DAG); + if (Subtarget->isFullSVEAvailable()) + return combineSVEReductionOrderedFP(N, AArch64ISD::FADDA_PRED, DAG); + break; case Intrinsic::aarch64_sve_faddv: return combineSVEReductionFP(N, AArch64ISD::FADDV_PRED, DAG); case Intrinsic::aarch64_sve_fmaxnmv: diff --git a/llvm/lib/Target/AArch64/AArch64Subtarget.h b/llvm/lib/Target/AArch64/AArch64Subtarget.h --- a/llvm/lib/Target/AArch64/AArch64Subtarget.h +++ b/llvm/lib/Target/AArch64/AArch64Subtarget.h @@ -215,9 +215,18 @@ /// mode, which disables NEON instructions). bool isNeonAvailable() const; + /// Returns true if the target has SVE and can use the full range of SVE + /// instructions, for example because it knows the function is known not to be + /// in streaming-SVE mode or when the target has FEAT_FA64 enabled. + bool isFullSVEAvailable() const; + + unsigned getMinVectorRegisterBitWidth() const { - // Don't assume any minimum vector size when PSTATE.SM may not be 0. - if (StreamingSVEMode || StreamingCompatibleSVEMode) + // Don't assume any minimum vector size when PSTATE.SM may not be 0, because + // we don't yet support streaming-compatible codegen support that we trust + // is safe for functions that may be executed in streaming-SVE mode. + // By returning '0' here, we disable vectorization. + if (!isFullSVEAvailable() && !isNeonAvailable()) return 0; return MinVectorRegisterBitWidth; } diff --git a/llvm/lib/Target/AArch64/AArch64Subtarget.cpp b/llvm/lib/Target/AArch64/AArch64Subtarget.cpp --- a/llvm/lib/Target/AArch64/AArch64Subtarget.cpp +++ b/llvm/lib/Target/AArch64/AArch64Subtarget.cpp @@ -478,13 +478,13 @@ bool AArch64Subtarget::useAA() const { return UseAA; } bool AArch64Subtarget::isNeonAvailable() const { - if (!hasNEON()) - return false; - - // The 'force-streaming-comaptible-sve' flag overrides the streaming - // function attributes. - if (ForceStreamingCompatibleSVE.getNumOccurrences() > 0) - return !ForceStreamingCompatibleSVE; + return hasNEON() && !isStreaming() && !isStreamingCompatible() && + !ForceStreamingCompatibleSVE; +} - return !isStreaming() && !isStreamingCompatible(); +bool AArch64Subtarget::isFullSVEAvailable() const{ + // FIXME: Also return false if FEAT_FA64 is set, but we can't do this yet + // as we don't yet support the feature in LLVM. + return hasSVEorSME() && !StreamingSVEMode && !StreamingCompatibleSVEMode && + !ForceStreamingCompatibleSVE; } diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp --- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp @@ -1943,8 +1943,7 @@ return TypeSize::getFixed(ST->hasNEON() ? 128 : 0); case TargetTransformInfo::RGK_ScalableVector: - if ((ST->isStreaming() || ST->isStreamingCompatible()) && - !EnableScalableAutovecInStreamingMode) + if (!ST->isFullSVEAvailable() && !EnableScalableAutovecInStreamingMode) return TypeSize::getScalable(0); return TypeSize::getScalable(ST->hasSVE() ? 128 : 0); diff --git a/llvm/test/CodeGen/AArch64/sve-fp-reduce-fadda.ll b/llvm/test/CodeGen/AArch64/sve-fp-reduce-fadda.ll --- a/llvm/test/CodeGen/AArch64/sve-fp-reduce-fadda.ll +++ b/llvm/test/CodeGen/AArch64/sve-fp-reduce-fadda.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mattr=+sve < %s | FileCheck %s -; FIXME: Streaming-compatible SVE doesn't include FADDA, so this shouldn't compile! -; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s +; Streaming-compatible SVE doesn't include FADDA, so this shouldn't compile! +; RUN: not --crash llc -mattr=+sve -force-streaming-compatible-sve < %s target triple = "aarch64-linux-gnu" diff --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-fp-reduce-fadda.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-fp-reduce-fadda.ll --- a/llvm/test/CodeGen/AArch64/sve-intrinsics-fp-reduce-fadda.ll +++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-fp-reduce-fadda.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve < %s | FileCheck %s -; FIXME: Streaming-compatible SVE doesn't include FADDA, so this shouldn't compile! -; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s +; Streaming-compatible SVE doesn't include FADDA, so this shouldn't compile! +; RUN: not --crash llc -mtriple=aarch64--linux-gnu -mattr=+sve -force-streaming-compatible-sve < %s ; ; FADDA diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-reduce.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-reduce.ll --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-reduce.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-reduce.ll @@ -10,11 +10,14 @@ define half @fadda_v4f16(half %start, <4 x half> %a) { ; CHECK-LABEL: fadda_v4f16: ; CHECK: // %bb.0: -; CHECK-NEXT: // kill: def $h0 killed $h0 def $z0 -; CHECK-NEXT: ptrue p0.h, vl4 ; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 -; CHECK-NEXT: fadda h0, p0, h0, z1.h -; CHECK-NEXT: // kill: def $h0 killed $h0 killed $z0 +; CHECK-NEXT: fadd h0, h0, h1 +; CHECK-NEXT: mov z2.h, z1.h[1] +; CHECK-NEXT: fadd h0, h0, h2 +; CHECK-NEXT: mov z2.h, z1.h[2] +; CHECK-NEXT: mov z1.h, z1.h[3] +; CHECK-NEXT: fadd h0, h0, h2 +; CHECK-NEXT: fadd h0, h0, h1 ; CHECK-NEXT: ret %res = call half @llvm.vector.reduce.fadd.v4f16(half %start, <4 x half> %a) ret half %res @@ -23,11 +26,22 @@ define half @fadda_v8f16(half %start, <8 x half> %a) { ; CHECK-LABEL: fadda_v8f16: ; CHECK: // %bb.0: -; CHECK-NEXT: // kill: def $h0 killed $h0 def $z0 -; CHECK-NEXT: ptrue p0.h, vl8 ; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 -; CHECK-NEXT: fadda h0, p0, h0, z1.h -; CHECK-NEXT: // kill: def $h0 killed $h0 killed $z0 +; CHECK-NEXT: fadd h0, h0, h1 +; CHECK-NEXT: mov z2.h, z1.h[1] +; CHECK-NEXT: fadd h0, h0, h2 +; CHECK-NEXT: mov z2.h, z1.h[2] +; CHECK-NEXT: fadd h0, h0, h2 +; CHECK-NEXT: mov z2.h, z1.h[3] +; CHECK-NEXT: fadd h0, h0, h2 +; CHECK-NEXT: mov z2.h, z1.h[4] +; CHECK-NEXT: fadd h0, h0, h2 +; CHECK-NEXT: mov z2.h, z1.h[5] +; CHECK-NEXT: fadd h0, h0, h2 +; CHECK-NEXT: mov z2.h, z1.h[6] +; CHECK-NEXT: mov z1.h, z1.h[7] +; CHECK-NEXT: fadd h0, h0, h2 +; CHECK-NEXT: fadd h0, h0, h1 ; CHECK-NEXT: ret %res = call half @llvm.vector.reduce.fadd.v8f16(half %start, <8 x half> %a) ret half %res @@ -36,12 +50,38 @@ define half @fadda_v16f16(half %start, ptr %a) { ; CHECK-LABEL: fadda_v16f16: ; CHECK: // %bb.0: -; CHECK-NEXT: ldp q1, q2, [x0] -; CHECK-NEXT: // kill: def $h0 killed $h0 def $z0 -; CHECK-NEXT: ptrue p0.h, vl8 -; CHECK-NEXT: fadda h0, p0, h0, z1.h -; CHECK-NEXT: fadda h0, p0, h0, z2.h -; CHECK-NEXT: // kill: def $h0 killed $h0 killed $z0 +; CHECK-NEXT: ldr q1, [x0] +; CHECK-NEXT: fadd h0, h0, h1 +; CHECK-NEXT: mov z2.h, z1.h[1] +; CHECK-NEXT: fadd h0, h0, h2 +; CHECK-NEXT: mov z2.h, z1.h[2] +; CHECK-NEXT: fadd h0, h0, h2 +; CHECK-NEXT: mov z2.h, z1.h[3] +; CHECK-NEXT: fadd h0, h0, h2 +; CHECK-NEXT: mov z2.h, z1.h[4] +; CHECK-NEXT: fadd h0, h0, h2 +; CHECK-NEXT: mov z2.h, z1.h[5] +; CHECK-NEXT: fadd h0, h0, h2 +; CHECK-NEXT: mov z2.h, z1.h[6] +; CHECK-NEXT: mov z1.h, z1.h[7] +; CHECK-NEXT: fadd h0, h0, h2 +; CHECK-NEXT: fadd h0, h0, h1 +; CHECK-NEXT: ldr q1, [x0, #16] +; CHECK-NEXT: mov z2.h, z1.h[1] +; CHECK-NEXT: fadd h0, h0, h1 +; CHECK-NEXT: fadd h0, h0, h2 +; CHECK-NEXT: mov z2.h, z1.h[2] +; CHECK-NEXT: fadd h0, h0, h2 +; CHECK-NEXT: mov z2.h, z1.h[3] +; CHECK-NEXT: fadd h0, h0, h2 +; CHECK-NEXT: mov z2.h, z1.h[4] +; CHECK-NEXT: fadd h0, h0, h2 +; CHECK-NEXT: mov z2.h, z1.h[5] +; CHECK-NEXT: fadd h0, h0, h2 +; CHECK-NEXT: mov z2.h, z1.h[6] +; CHECK-NEXT: mov z1.h, z1.h[7] +; CHECK-NEXT: fadd h0, h0, h2 +; CHECK-NEXT: fadd h0, h0, h1 ; CHECK-NEXT: ret %op = load <16 x half>, ptr %a %res = call half @llvm.vector.reduce.fadd.v16f16(half %start, <16 x half> %op) @@ -51,11 +91,10 @@ define float @fadda_v2f32(float %start, <2 x float> %a) { ; CHECK-LABEL: fadda_v2f32: ; CHECK: // %bb.0: -; CHECK-NEXT: // kill: def $s0 killed $s0 def $z0 -; CHECK-NEXT: ptrue p0.s, vl2 ; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 -; CHECK-NEXT: fadda s0, p0, s0, z1.s -; CHECK-NEXT: // kill: def $s0 killed $s0 killed $z0 +; CHECK-NEXT: fadd s0, s0, s1 +; CHECK-NEXT: mov z1.s, z1.s[1] +; CHECK-NEXT: fadd s0, s0, s1 ; CHECK-NEXT: ret %res = call float @llvm.vector.reduce.fadd.v2f32(float %start, <2 x float> %a) ret float %res @@ -64,11 +103,14 @@ define float @fadda_v4f32(float %start, <4 x float> %a) { ; CHECK-LABEL: fadda_v4f32: ; CHECK: // %bb.0: -; CHECK-NEXT: // kill: def $s0 killed $s0 def $z0 -; CHECK-NEXT: ptrue p0.s, vl4 ; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 -; CHECK-NEXT: fadda s0, p0, s0, z1.s -; CHECK-NEXT: // kill: def $s0 killed $s0 killed $z0 +; CHECK-NEXT: fadd s0, s0, s1 +; CHECK-NEXT: mov z2.s, z1.s[1] +; CHECK-NEXT: fadd s0, s0, s2 +; CHECK-NEXT: mov z2.s, z1.s[2] +; CHECK-NEXT: mov z1.s, z1.s[3] +; CHECK-NEXT: fadd s0, s0, s2 +; CHECK-NEXT: fadd s0, s0, s1 ; CHECK-NEXT: ret %res = call float @llvm.vector.reduce.fadd.v4f32(float %start, <4 x float> %a) ret float %res @@ -77,12 +119,22 @@ define float @fadda_v8f32(float %start, ptr %a) { ; CHECK-LABEL: fadda_v8f32: ; CHECK: // %bb.0: -; CHECK-NEXT: ldp q1, q2, [x0] -; CHECK-NEXT: // kill: def $s0 killed $s0 def $z0 -; CHECK-NEXT: ptrue p0.s, vl4 -; CHECK-NEXT: fadda s0, p0, s0, z1.s -; CHECK-NEXT: fadda s0, p0, s0, z2.s -; CHECK-NEXT: // kill: def $s0 killed $s0 killed $z0 +; CHECK-NEXT: ldr q1, [x0] +; CHECK-NEXT: fadd s0, s0, s1 +; CHECK-NEXT: mov z2.s, z1.s[1] +; CHECK-NEXT: fadd s0, s0, s2 +; CHECK-NEXT: mov z2.s, z1.s[2] +; CHECK-NEXT: mov z1.s, z1.s[3] +; CHECK-NEXT: fadd s0, s0, s2 +; CHECK-NEXT: fadd s0, s0, s1 +; CHECK-NEXT: ldr q1, [x0, #16] +; CHECK-NEXT: mov z2.s, z1.s[1] +; CHECK-NEXT: fadd s0, s0, s1 +; CHECK-NEXT: fadd s0, s0, s2 +; CHECK-NEXT: mov z2.s, z1.s[2] +; CHECK-NEXT: mov z1.s, z1.s[3] +; CHECK-NEXT: fadd s0, s0, s2 +; CHECK-NEXT: fadd s0, s0, s1 ; CHECK-NEXT: ret %op = load <8 x float>, ptr %a %res = call float @llvm.vector.reduce.fadd.v8f32(float %start, <8 x float> %op) @@ -102,11 +154,10 @@ define double @fadda_v2f64(double %start, <2 x double> %a) { ; CHECK-LABEL: fadda_v2f64: ; CHECK: // %bb.0: -; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 -; CHECK-NEXT: ptrue p0.d, vl2 ; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 -; CHECK-NEXT: fadda d0, p0, d0, z1.d -; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: fadd d0, d0, d1 +; CHECK-NEXT: mov z1.d, z1.d[1] +; CHECK-NEXT: fadd d0, d0, d1 ; CHECK-NEXT: ret %res = call double @llvm.vector.reduce.fadd.v2f64(double %start, <2 x double> %a) ret double %res @@ -115,12 +166,14 @@ define double @fadda_v4f64(double %start, ptr %a) { ; CHECK-LABEL: fadda_v4f64: ; CHECK: // %bb.0: -; CHECK-NEXT: ldp q1, q2, [x0] -; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 -; CHECK-NEXT: ptrue p0.d, vl2 -; CHECK-NEXT: fadda d0, p0, d0, z1.d -; CHECK-NEXT: fadda d0, p0, d0, z2.d -; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ldr q1, [x0] +; CHECK-NEXT: fadd d0, d0, d1 +; CHECK-NEXT: mov z1.d, z1.d[1] +; CHECK-NEXT: fadd d0, d0, d1 +; CHECK-NEXT: ldr q1, [x0, #16] +; CHECK-NEXT: fadd d0, d0, d1 +; CHECK-NEXT: mov z1.d, z1.d[1] +; CHECK-NEXT: fadd d0, d0, d1 ; CHECK-NEXT: ret %op = load <4 x double>, ptr %a %res = call double @llvm.vector.reduce.fadd.v4f64(double %start, <4 x double> %op)