Index: llvm/trunk/lib/Transforms/Vectorize/SLPVectorizer.cpp =================================================================== --- llvm/trunk/lib/Transforms/Vectorize/SLPVectorizer.cpp +++ llvm/trunk/lib/Transforms/Vectorize/SLPVectorizer.cpp @@ -1793,7 +1793,10 @@ DEBUG(dbgs() << "SLP: Check whether the tree with height " << VectorizableTree.size() << " is fully vectorizable .\n"); - // We only handle trees of height 2. + // We only handle trees of heights 1 and 2. + if (VectorizableTree.size() == 1 && !VectorizableTree[0].NeedToGather) + return true; + if (VectorizableTree.size() != 2) return false; @@ -4165,12 +4168,21 @@ // Visit left or right. Value *NextV = TreeN->getOperand(EdgeToVist); - // We currently only allow BinaryOperator's and SelectInst's as reduction - // values in our tree. - if (isa(NextV) || isa(NextV)) - Stack.push_back(std::make_pair(cast(NextV), 0)); - else if (NextV != Phi) + if (NextV != Phi) { + auto *I = dyn_cast(NextV); + // Continue analysis if the next operand is a reduction operation or + // (possibly) a reduced value. If the reduced value opcode is not set, + // the first met operation != reduction operation is considered as the + // reduced value class. + if (I && (!ReducedValueOpcode || I->getOpcode() == ReducedValueOpcode || + I->getOpcode() == ReductionOpcode)) { + if (!ReducedValueOpcode && I->getOpcode() != ReductionOpcode) + ReducedValueOpcode = I->getOpcode(); + Stack.push_back(std::make_pair(I, 0)); + continue; + } return false; + } } return true; } @@ -4571,8 +4583,10 @@ if (BinaryOperator *BinOp = dyn_cast(RI->getOperand(0))) { DEBUG(dbgs() << "SLP: Found a return to vectorize.\n"); - if (tryToVectorizePair(BinOp->getOperand(0), - BinOp->getOperand(1), R)) { + if (canMatchHorizontalReduction(nullptr, BinOp, R, TTI, + R.getMinVecRegSize()) || + tryToVectorizePair(BinOp->getOperand(0), BinOp->getOperand(1), + R)) { Changed = true; it = BB->begin(); e = BB->end(); Index: llvm/trunk/test/Transforms/SLPVectorizer/X86/reduction_unrolled.ll =================================================================== --- llvm/trunk/test/Transforms/SLPVectorizer/X86/reduction_unrolled.ll +++ llvm/trunk/test/Transforms/SLPVectorizer/X86/reduction_unrolled.ll @@ -0,0 +1,48 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -slp-vectorizer -slp-vectorize-hor -S -mtriple=x86_64-unknown-linux-gnu -mcpu=bdver2 < %s | FileCheck %s + +; int test(unsigned int *p) { +; int sum = 0; +; for (int i = 0; i < 8; i++) +; sum += p[i]; +; return sum; +; } + +define i32 @test(i32* nocapture readonly %p) { +; CHECK-LABEL: @test( +; CHECK: [[BC:%.*]] = bitcast i32* %p to <8 x i32>* +; CHECK-NEXT: [[LD:%.*]] = load <8 x i32>, <8 x i32>* [[BC]], align 4 +; CHECK: [[RDX_SHUF:%.*]] = shufflevector <8 x i32> [[LD]], <8 x i32> undef, <8 x i32> +; CHECK-NEXT: [[BIN_RDX:%.*]] = add <8 x i32> [[LD]], [[RDX_SHUF]] +; CHECK-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <8 x i32> [[BIN_RDX]], <8 x i32> undef, <8 x i32> +; CHECK-NEXT: [[BIN_RDX2:%.*]] = add <8 x i32> [[BIN_RDX]], [[RDX_SHUF1]] +; CHECK-NEXT: [[RDX_SHUF3:%.*]] = shufflevector <8 x i32> [[BIN_RDX2]], <8 x i32> undef, <8 x i32> +; CHECK-NEXT: [[BIN_RDX4:%.*]] = add <8 x i32> [[BIN_RDX2]], [[RDX_SHUF3]] +; CHECK-NEXT: [[TMP2:%.*]] = extractelement <8 x i32> [[BIN_RDX4]], i32 0 +; CHECK: ret i32 [[TMP2]] +; +entry: + %0 = load i32, i32* %p, align 4 + %arrayidx.1 = getelementptr inbounds i32, i32* %p, i64 1 + %1 = load i32, i32* %arrayidx.1, align 4 + %mul.18 = add i32 %1, %0 + %arrayidx.2 = getelementptr inbounds i32, i32* %p, i64 2 + %2 = load i32, i32* %arrayidx.2, align 4 + %mul.29 = add i32 %2, %mul.18 + %arrayidx.3 = getelementptr inbounds i32, i32* %p, i64 3 + %3 = load i32, i32* %arrayidx.3, align 4 + %mul.310 = add i32 %3, %mul.29 + %arrayidx.4 = getelementptr inbounds i32, i32* %p, i64 4 + %4 = load i32, i32* %arrayidx.4, align 4 + %mul.411 = add i32 %4, %mul.310 + %arrayidx.5 = getelementptr inbounds i32, i32* %p, i64 5 + %5 = load i32, i32* %arrayidx.5, align 4 + %mul.512 = add i32 %5, %mul.411 + %arrayidx.6 = getelementptr inbounds i32, i32* %p, i64 6 + %6 = load i32, i32* %arrayidx.6, align 4 + %mul.613 = add i32 %6, %mul.512 + %arrayidx.7 = getelementptr inbounds i32, i32* %p, i64 7 + %7 = load i32, i32* %arrayidx.7, align 4 + %mul.714 = add i32 %7, %mul.613 + ret i32 %mul.714 +} Index: llvm/trunk/test/Transforms/SLPVectorizer/X86/scheduling.ll =================================================================== --- llvm/trunk/test/Transforms/SLPVectorizer/X86/scheduling.ll +++ llvm/trunk/test/Transforms/SLPVectorizer/X86/scheduling.ll @@ -1,20 +1,20 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt < %s -basicaa -slp-vectorizer -S -mtriple=i386-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s -target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128" -target triple = "x86_64-apple-macosx10.9.0" - -;CHECK-LABEL: @foo -;CHECK: load <4 x i32> -;CHECK: load <4 x i32> -;CHECK: %[[S1:.+]] = add nsw <4 x i32> -;CHECK-DAG: store <4 x i32> %[[S1]] -;CHECK-DAG: %[[A1:.+]] = add nsw i32 -;CHECK-DAG: %[[A2:.+]] = add nsw i32 %[[A1]] -;CHECK-DAG: %[[A3:.+]] = add nsw i32 %[[A2]] -;CHECK-DAG: %[[A4:.+]] = add nsw i32 %[[A3]] -;CHECK: ret i32 %[[A4]] - define i32 @foo(i32* nocapture readonly %diff) #0 { +; CHECK-LABEL: @foo( +; CHECK: load <4 x i32> +; CHECK: load <4 x i32> +; CHECK: [[S1:%.+]] = add nsw <4 x i32> +; CHECK: store <4 x i32> [[S1]], +; CHECK: [[RDX_SHUF:%.*]] = shufflevector <4 x i32> [[S1]], <4 x i32> undef, <4 x i32> +; CHECK-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[S1]], [[RDX_SHUF]] +; CHECK-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <4 x i32> [[BIN_RDX]], <4 x i32> undef, <4 x i32> +; CHECK-NEXT: [[BIN_RDX2:%.*]] = add <4 x i32> [[BIN_RDX]], [[RDX_SHUF1]] +; CHECK-NEXT: [[TMP15:%.*]] = extractelement <4 x i32> [[BIN_RDX2]], i32 0 +; CHECK-NEXT: [[ADD52:%.*]] = add nsw i32 [[TMP15]], +; CHECK: ret i32 [[ADD52]] +; entry: %m2 = alloca [8 x [8 x i32]], align 16 %0 = bitcast [8 x [8 x i32]]* %m2 to i8*