diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp --- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp +++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp @@ -12701,6 +12701,9 @@ return I->getFastMathFlags().noNaNs(); } + if (Kind == RecurKind::FMaximum || Kind == RecurKind::FMinimum) + return true; + return I->isAssociative(); } @@ -12751,6 +12754,18 @@ minnum(cast(LHS)->getValueAPF(), cast(RHS)->getValueAPF())); return Builder.CreateBinaryIntrinsic(Intrinsic::minnum, LHS, RHS); + case RecurKind::FMaximum: + if (IsConstant) + return ConstantFP::get(LHS->getType(), + maximum(cast(LHS)->getValueAPF(), + cast(RHS)->getValueAPF())); + return Builder.CreateBinaryIntrinsic(Intrinsic::maximum, LHS, RHS); + case RecurKind::FMinimum: + if (IsConstant) + return ConstantFP::get(LHS->getType(), + minimum(cast(LHS)->getValueAPF(), + cast(RHS)->getValueAPF())); + return Builder.CreateBinaryIntrinsic(Intrinsic::minimum, LHS, RHS); case RecurKind::SMax: if (IsConstant || UseSelect) { Value *Cmp = Builder.CreateICmpSGT(LHS, RHS, Name); @@ -12833,6 +12848,10 @@ if (match(I, m_Intrinsic(m_Value(), m_Value()))) return RecurKind::FMin; + if (match(I, m_Intrinsic(m_Value(), m_Value()))) + return RecurKind::FMaximum; + if (match(I, m_Intrinsic(m_Value(), m_Value()))) + return RecurKind::FMinimum; // This matches either cmp+select or intrinsics. SLP is expected to handle // either form. // TODO: If we are canonicalizing to intrinsics, we can remove several @@ -13800,6 +13819,8 @@ } case RecurKind::FMax: case RecurKind::FMin: + case RecurKind::FMaximum: + case RecurKind::FMinimum: case RecurKind::SMax: case RecurKind::SMin: case RecurKind::UMax: @@ -14131,6 +14152,10 @@ return true; if (match(I, m_Intrinsic(m_Value(V0), m_Value(V1)))) return true; + if (match(I, m_Intrinsic(m_Value(V0), m_Value(V1)))) + return true; + if (match(I, m_Intrinsic(m_Value(V0), m_Value(V1)))) + return true; if (match(I, m_Intrinsic(m_Value(V0), m_Value(V1)))) return true; if (match(I, m_Intrinsic(m_Value(V0), m_Value(V1)))) diff --git a/llvm/test/Transforms/SLPVectorizer/X86/fmaximum-fminimum.ll b/llvm/test/Transforms/SLPVectorizer/X86/fmaximum-fminimum.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/SLPVectorizer/X86/fmaximum-fminimum.ll @@ -0,0 +1,204 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2 +; RUN: opt < %s -mtriple=x86_64-unknown -passes=slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,SSE +; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=corei7-avx -passes=slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,AVX,AVX256 +declare float @llvm.maximum.f32(float, float) +declare float @llvm.minimum.f32(float, float) +declare double @llvm.maximum.f64(double, double) +declare double @llvm.minimum.f64(double, double) + +@srcA64 = common global [8 x double] zeroinitializer, align 64 +@srcB64 = common global [8 x double] zeroinitializer, align 64 +@srcC64 = common global [8 x double] zeroinitializer, align 64 +@srcA32 = common global [16 x float] zeroinitializer, align 64 +@srcB32 = common global [16 x float] zeroinitializer, align 64 +@srcC32 = common global [16 x float] zeroinitializer, align 64 +@dst64 = common global [8 x double] zeroinitializer, align 64 +@dst32 = common global [16 x float] zeroinitializer, align 64 + +define void @fmaximum_2f64() { +; SSE-LABEL: define void @fmaximum_2f64() { +; SSE-NEXT: [[TMP1:%.*]] = load <2 x double>, ptr @srcA64, align 8 +; SSE-NEXT: [[TMP2:%.*]] = load <2 x double>, ptr @srcB64, align 8 +; SSE-NEXT: [[TMP3:%.*]] = call <2 x double> @llvm.maximum.v2f64(<2 x double> [[TMP1]], <2 x double> [[TMP2]]) +; SSE-NEXT: store <2 x double> [[TMP3]], ptr @dst64, align 8 +; SSE-NEXT: ret void +; +; AVX-LABEL: define void @fmaximum_2f64 +; AVX-SAME: () #[[ATTR1:[0-9]+]] { +; AVX-NEXT: [[TMP1:%.*]] = load <2 x double>, ptr @srcA64, align 8 +; AVX-NEXT: [[TMP2:%.*]] = load <2 x double>, ptr @srcB64, align 8 +; AVX-NEXT: [[TMP3:%.*]] = call <2 x double> @llvm.maximum.v2f64(<2 x double> [[TMP1]], <2 x double> [[TMP2]]) +; AVX-NEXT: store <2 x double> [[TMP3]], ptr @dst64, align 8 +; AVX-NEXT: ret void +; + %a0 = load double, ptr @srcA64, align 8 + %a1 = load double, ptr getelementptr inbounds ([8 x double], ptr @srcA64, i32 0, i64 1), align 8 + %b0 = load double, ptr @srcB64, align 8 + %b1 = load double, ptr getelementptr inbounds ([8 x double], ptr @srcB64, i32 0, i64 1), align 8 + %fmaximum0 = call double @llvm.maximum.f64(double %a0, double %b0) + %fmaximum1 = call double @llvm.maximum.f64(double %a1, double %b1) + store double %fmaximum0, ptr @dst64, align 8 + store double %fmaximum1, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 8 + ret void +} + +define void @fmaximum_4f64() { +; SSE-LABEL: define void @fmaximum_4f64() { +; SSE-NEXT: [[TMP1:%.*]] = load <2 x double>, ptr @srcA64, align 8 +; SSE-NEXT: [[TMP2:%.*]] = load <2 x double>, ptr @srcB64, align 8 +; SSE-NEXT: [[TMP3:%.*]] = call <2 x double> @llvm.maximum.v2f64(<2 x double> [[TMP1]], <2 x double> [[TMP2]]) +; SSE-NEXT: store <2 x double> [[TMP3]], ptr @dst64, align 8 +; SSE-NEXT: [[TMP4:%.*]] = load <2 x double>, ptr getelementptr inbounds ([8 x double], ptr @srcA64, i32 0, i64 2), align 8 +; SSE-NEXT: [[TMP5:%.*]] = load <2 x double>, ptr getelementptr inbounds ([8 x double], ptr @srcB64, i32 0, i64 2), align 8 +; SSE-NEXT: [[TMP6:%.*]] = call <2 x double> @llvm.maximum.v2f64(<2 x double> [[TMP4]], <2 x double> [[TMP5]]) +; SSE-NEXT: store <2 x double> [[TMP6]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 8 +; SSE-NEXT: ret void +; +; AVX-LABEL: define void @fmaximum_4f64 +; AVX-SAME: () #[[ATTR1]] { +; AVX-NEXT: [[TMP1:%.*]] = load <4 x double>, ptr @srcA64, align 8 +; AVX-NEXT: [[TMP2:%.*]] = load <4 x double>, ptr @srcB64, align 8 +; AVX-NEXT: [[TMP3:%.*]] = call <4 x double> @llvm.maximum.v4f64(<4 x double> [[TMP1]], <4 x double> [[TMP2]]) +; AVX-NEXT: store <4 x double> [[TMP3]], ptr @dst64, align 8 +; AVX-NEXT: ret void +; + %a0 = load double, ptr @srcA64, align 8 + %a1 = load double, ptr getelementptr inbounds ([8 x double], ptr @srcA64, i32 0, i64 1), align 8 + %a2 = load double, ptr getelementptr inbounds ([8 x double], ptr @srcA64, i32 0, i64 2), align 8 + %a3 = load double, ptr getelementptr inbounds ([8 x double], ptr @srcA64, i32 0, i64 3), align 8 + %b0 = load double, ptr @srcB64, align 8 + %b1 = load double, ptr getelementptr inbounds ([8 x double], ptr @srcB64, i32 0, i64 1), align 8 + %b2 = load double, ptr getelementptr inbounds ([8 x double], ptr @srcB64, i32 0, i64 2), align 8 + %b3 = load double, ptr getelementptr inbounds ([8 x double], ptr @srcB64, i32 0, i64 3), align 8 + %fmaximum0 = call double @llvm.maximum.f64(double %a0, double %b0) + %fmaximum1 = call double @llvm.maximum.f64(double %a1, double %b1) + %fmaximum2 = call double @llvm.maximum.f64(double %a2, double %b2) + %fmaximum3 = call double @llvm.maximum.f64(double %a3, double %b3) + store double %fmaximum0, ptr @dst64, align 8 + store double %fmaximum1, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 8 + store double %fmaximum2, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 8 + store double %fmaximum3, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 3), align 8 + ret void +} + +define void @fmaximum_8f64() { +; SSE-LABEL: define void @fmaximum_8f64() { +; SSE-NEXT: [[TMP1:%.*]] = load <2 x double>, ptr @srcA64, align 4 +; SSE-NEXT: [[TMP2:%.*]] = load <2 x double>, ptr @srcB64, align 4 +; SSE-NEXT: [[TMP3:%.*]] = call <2 x double> @llvm.maximum.v2f64(<2 x double> [[TMP1]], <2 x double> [[TMP2]]) +; SSE-NEXT: store <2 x double> [[TMP3]], ptr @dst64, align 4 +; SSE-NEXT: [[TMP4:%.*]] = load <2 x double>, ptr getelementptr inbounds ([8 x double], ptr @srcA64, i32 0, i64 2), align 4 +; SSE-NEXT: [[TMP5:%.*]] = load <2 x double>, ptr getelementptr inbounds ([8 x double], ptr @srcB64, i32 0, i64 2), align 4 +; SSE-NEXT: [[TMP6:%.*]] = call <2 x double> @llvm.maximum.v2f64(<2 x double> [[TMP4]], <2 x double> [[TMP5]]) +; SSE-NEXT: store <2 x double> [[TMP6]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 4 +; SSE-NEXT: [[TMP7:%.*]] = load <2 x double>, ptr getelementptr inbounds ([8 x double], ptr @srcA64, i32 0, i64 4), align 4 +; SSE-NEXT: [[TMP8:%.*]] = load <2 x double>, ptr getelementptr inbounds ([8 x double], ptr @srcB64, i32 0, i64 4), align 4 +; SSE-NEXT: [[TMP9:%.*]] = call <2 x double> @llvm.maximum.v2f64(<2 x double> [[TMP7]], <2 x double> [[TMP8]]) +; SSE-NEXT: store <2 x double> [[TMP9]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 4), align 4 +; SSE-NEXT: [[TMP10:%.*]] = load <2 x double>, ptr getelementptr inbounds ([8 x double], ptr @srcA64, i32 0, i64 6), align 4 +; SSE-NEXT: [[TMP11:%.*]] = load <2 x double>, ptr getelementptr inbounds ([8 x double], ptr @srcB64, i32 0, i64 6), align 4 +; SSE-NEXT: [[TMP12:%.*]] = call <2 x double> @llvm.maximum.v2f64(<2 x double> [[TMP10]], <2 x double> [[TMP11]]) +; SSE-NEXT: store <2 x double> [[TMP12]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 6), align 4 +; SSE-NEXT: ret void +; +; AVX-LABEL: define void @fmaximum_8f64 +; AVX-SAME: () #[[ATTR1]] { +; AVX-NEXT: [[TMP1:%.*]] = load <4 x double>, ptr @srcA64, align 4 +; AVX-NEXT: [[TMP2:%.*]] = load <4 x double>, ptr @srcB64, align 4 +; AVX-NEXT: [[TMP3:%.*]] = call <4 x double> @llvm.maximum.v4f64(<4 x double> [[TMP1]], <4 x double> [[TMP2]]) +; AVX-NEXT: store <4 x double> [[TMP3]], ptr @dst64, align 4 +; AVX-NEXT: [[TMP4:%.*]] = load <4 x double>, ptr getelementptr inbounds ([8 x double], ptr @srcA64, i32 0, i64 4), align 4 +; AVX-NEXT: [[TMP5:%.*]] = load <4 x double>, ptr getelementptr inbounds ([8 x double], ptr @srcB64, i32 0, i64 4), align 4 +; AVX-NEXT: [[TMP6:%.*]] = call <4 x double> @llvm.maximum.v4f64(<4 x double> [[TMP4]], <4 x double> [[TMP5]]) +; AVX-NEXT: store <4 x double> [[TMP6]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 4), align 4 +; AVX-NEXT: ret void +; + %a0 = load double, ptr @srcA64, align 4 + %a1 = load double, ptr getelementptr inbounds ([8 x double], ptr @srcA64, i32 0, i64 1), align 4 + %a2 = load double, ptr getelementptr inbounds ([8 x double], ptr @srcA64, i32 0, i64 2), align 4 + %a3 = load double, ptr getelementptr inbounds ([8 x double], ptr @srcA64, i32 0, i64 3), align 4 + %a4 = load double, ptr getelementptr inbounds ([8 x double], ptr @srcA64, i32 0, i64 4), align 4 + %a5 = load double, ptr getelementptr inbounds ([8 x double], ptr @srcA64, i32 0, i64 5), align 4 + %a6 = load double, ptr getelementptr inbounds ([8 x double], ptr @srcA64, i32 0, i64 6), align 4 + %a7 = load double, ptr getelementptr inbounds ([8 x double], ptr @srcA64, i32 0, i64 7), align 4 + %b0 = load double, ptr @srcB64, align 4 + %b1 = load double, ptr getelementptr inbounds ([8 x double], ptr @srcB64, i32 0, i64 1), align 4 + %b2 = load double, ptr getelementptr inbounds ([8 x double], ptr @srcB64, i32 0, i64 2), align 4 + %b3 = load double, ptr getelementptr inbounds ([8 x double], ptr @srcB64, i32 0, i64 3), align 4 + %b4 = load double, ptr getelementptr inbounds ([8 x double], ptr @srcB64, i32 0, i64 4), align 4 + %b5 = load double, ptr getelementptr inbounds ([8 x double], ptr @srcB64, i32 0, i64 5), align 4 + %b6 = load double, ptr getelementptr inbounds ([8 x double], ptr @srcB64, i32 0, i64 6), align 4 + %b7 = load double, ptr getelementptr inbounds ([8 x double], ptr @srcB64, i32 0, i64 7), align 4 + %fmaximum0 = call double @llvm.maximum.f64(double %a0, double %b0) + %fmaximum1 = call double @llvm.maximum.f64(double %a1, double %b1) + %fmaximum2 = call double @llvm.maximum.f64(double %a2, double %b2) + %fmaximum3 = call double @llvm.maximum.f64(double %a3, double %b3) + %fmaximum4 = call double @llvm.maximum.f64(double %a4, double %b4) + %fmaximum5 = call double @llvm.maximum.f64(double %a5, double %b5) + %fmaximum6 = call double @llvm.maximum.f64(double %a6, double %b6) + %fmaximum7 = call double @llvm.maximum.f64(double %a7, double %b7) + store double %fmaximum0, ptr @dst64, align 4 + store double %fmaximum1, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 4 + store double %fmaximum2, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 4 + store double %fmaximum3, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 3), align 4 + store double %fmaximum4, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 4), align 4 + store double %fmaximum5, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 5), align 4 + store double %fmaximum6, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 6), align 4 + store double %fmaximum7, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 7), align 4 + ret void +} + +define float @reduction_v4f32(ptr %p) { +; SSE-LABEL: define float @reduction_v4f32 +; SSE-SAME: (ptr [[P:%.*]]) { +; SSE-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr [[P]], align 4 +; SSE-NEXT: [[TMP2:%.*]] = call float @llvm.vector.reduce.fmaximum.v4f32(<4 x float> [[TMP1]]) +; SSE-NEXT: ret float [[TMP2]] +; +; AVX-LABEL: define float @reduction_v4f32 +; AVX-SAME: (ptr [[P:%.*]]) #[[ATTR1]] { +; AVX-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr [[P]], align 4 +; AVX-NEXT: [[TMP2:%.*]] = call float @llvm.vector.reduce.fmaximum.v4f32(<4 x float> [[TMP1]]) +; AVX-NEXT: ret float [[TMP2]] +; + %g1 = getelementptr inbounds float, ptr %p, i64 1 + %g2 = getelementptr inbounds float, ptr %p, i64 2 + %g3 = getelementptr inbounds float, ptr %p, i64 3 + %t0 = load float, ptr %p, align 4 + %t1 = load float, ptr %g1, align 4 + %t2 = load float, ptr %g2, align 4 + %t3 = load float, ptr %g3, align 4 + %m1 = tail call float @llvm.maximum.f32(float %t1, float %t0) + %m2 = tail call float @llvm.maximum.f32(float %t2, float %m1) + %m3 = tail call float @llvm.maximum.f32(float %t3, float %m2) + ret float %m3 +} + +define double @reduction_v4f64_fminimum(ptr %p) { +; SSE-LABEL: define double @reduction_v4f64_fminimum +; SSE-SAME: (ptr [[P:%.*]]) { +; SSE-NEXT: [[TMP1:%.*]] = load <4 x double>, ptr [[P]], align 4 +; SSE-NEXT: [[TMP2:%.*]] = call double @llvm.vector.reduce.fminimum.v4f64(<4 x double> [[TMP1]]) +; SSE-NEXT: ret double [[TMP2]] +; +; AVX-LABEL: define double @reduction_v4f64_fminimum +; AVX-NEXT: [[TMP1:%.*]] = load <4 x double>, ptr [[P]], align 4 +; AVX-NEXT: [[TMP2:%.*]] = call double @llvm.vector.reduce.fminimum.v4f64(<4 x double> [[TMP1]]) +; AVX-NEXT: ret double [[TMP2]] +; + %g1 = getelementptr inbounds double, ptr %p, i64 1 + %g2 = getelementptr inbounds double, ptr %p, i64 2 + %g3 = getelementptr inbounds double, ptr %p, i64 3 + %t0 = load double, ptr %p, align 4 + %t1 = load double, ptr %g1, align 4 + %t2 = load double, ptr %g2, align 4 + %t3 = load double, ptr %g3, align 4 + %m1 = tail call double @llvm.minimum.f64(double %t1, double %t0) + %m2 = tail call double @llvm.minimum.f64(double %t2, double %m1) + %m3 = tail call double @llvm.minimum.f64(double %t3, double %m2) + ret double %m3 +} +; CHECK: {{.*}} +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; AVX256: {{.*}} +; CHECK: {{.*}}