Index: lib/Transforms/Vectorize/SLPVectorizer.cpp =================================================================== --- lib/Transforms/Vectorize/SLPVectorizer.cpp +++ lib/Transforms/Vectorize/SLPVectorizer.cpp @@ -1154,7 +1154,8 @@ DEBUG(dbgs() << "SLP: Gathering loads of non-packed type.\n"); return; } - // Check if the loads are consecutive or of we need to swizzle them. + + // Check if the loads are consecutive or reversed. for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) { LoadInst *L = cast(VL[i]); if (!L->isSimple()) { @@ -1163,17 +1164,31 @@ DEBUG(dbgs() << "SLP: Gathering non-simple loads.\n"); return; } + } - if (!isConsecutiveAccess(VL[i], VL[i + 1], *DL, *SE)) { - if (VL.size() == 2 && isConsecutiveAccess(VL[1], VL[0], *DL, *SE)) { - ++NumLoadsWantToChangeOrder; + // TODO: What we really want is to sort the loads, but for now, check + // the two likely directions. + bool Consecutive = true; + for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) + if (!isConsecutiveAccess(VL[i], VL[i + 1], *DL, *SE)) + Consecutive = false; + + if (!Consecutive) { + for (unsigned i = VL.size() - 1; i > 0; --i) { + if (!isConsecutiveAccess(VL[i], VL[i - 1], *DL, *SE)) { + BS.cancelScheduling(VL); + newTreeEntry(VL, false); + DEBUG(dbgs() << "SLP: Gathering non-consecutive loads.\n"); + return; } - BS.cancelScheduling(VL); - newTreeEntry(VL, false); - DEBUG(dbgs() << "SLP: Gathering non-consecutive loads.\n"); - return; } + ++NumLoadsWantToChangeOrder; + BS.cancelScheduling(VL); + newTreeEntry(VL, false); + DEBUG(dbgs() << "SLP: Gathering reversed loads.\n"); + return; } + ++NumLoadsWantToKeepOrder; newTreeEntry(VL, true); DEBUG(dbgs() << "SLP: added a vector of loads.\n"); @@ -3798,8 +3813,7 @@ BuildVectorSlice = BuildVector.slice(i, OpsWidth); R.buildTree(Ops, BuildVectorSlice); - // TODO: check if we can allow reordering also for other cases than - // tryToVectorizePair() + // TODO: check if we can allow reordering for more cases. if (allowReorder && R.shouldReorder()) { assert(Ops.size() == 2); assert(BuildVectorSlice.empty()); @@ -4078,7 +4092,12 @@ unsigned i = 0; for (; i < NumReducedVals - ReduxWidth + 1; i += ReduxWidth) { - V.buildTree(makeArrayRef(&ReducedVals[i], ReduxWidth), ReductionOps); + auto VL = makeArrayRef(&ReducedVals[i], ReduxWidth); + V.buildTree(VL, ReductionOps); + if (V.shouldReorder()) { + SmallVector Reversed(VL.rbegin(), VL.rend()); + V.buildTree(Reversed, ReductionOps); + } V.computeMinimumValueSizes(); // Estimate cost. Index: test/Transforms/SLPVectorizer/X86/reduction_loads.ll =================================================================== --- test/Transforms/SLPVectorizer/X86/reduction_loads.ll +++ test/Transforms/SLPVectorizer/X86/reduction_loads.ll @@ -0,0 +1,49 @@ +; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.10.0 -mattr=+sse4.2 | FileCheck %s + +; CHECK-LABEL: @test +; CHECK: %0 = bitcast i32* %p to <8 x i32>* +; CHECK: %1 = load <8 x i32>, <8 x i32>* %0, align 4 +; CHECK: %2 = mul <8 x i32> , %1 + +define i32 @test(i32* nocapture readonly %p) { +entry: + %arrayidx.1 = getelementptr inbounds i32, i32* %p, i64 1 + %arrayidx.2 = getelementptr inbounds i32, i32* %p, i64 2 + %arrayidx.3 = getelementptr inbounds i32, i32* %p, i64 3 + %arrayidx.4 = getelementptr inbounds i32, i32* %p, i64 4 + %arrayidx.5 = getelementptr inbounds i32, i32* %p, i64 5 + %arrayidx.6 = getelementptr inbounds i32, i32* %p, i64 6 + %arrayidx.7 = getelementptr inbounds i32, i32* %p, i64 7 + br label %for.body + +for.body: + %sum = phi i32 [ 0, %entry ], [ %add.7, %for.body ] + %tmp = load i32, i32* %p, align 4 + %mul = mul i32 %tmp, 42 + %add = add i32 %mul, %sum + %tmp5 = load i32, i32* %arrayidx.1, align 4 + %mul.1 = mul i32 %tmp5, 42 + %add.1 = add i32 %mul.1, %add + %tmp6 = load i32, i32* %arrayidx.2, align 4 + %mul.2 = mul i32 %tmp6, 42 + %add.2 = add i32 %mul.2, %add.1 + %tmp7 = load i32, i32* %arrayidx.3, align 4 + %mul.3 = mul i32 %tmp7, 42 + %add.3 = add i32 %mul.3, %add.2 + %tmp8 = load i32, i32* %arrayidx.4, align 4 + %mul.4 = mul i32 %tmp8, 42 + %add.4 = add i32 %mul.4, %add.3 + %tmp9 = load i32, i32* %arrayidx.5, align 4 + %mul.5 = mul i32 %tmp9, 42 + %add.5 = add i32 %mul.5, %add.4 + %tmp10 = load i32, i32* %arrayidx.6, align 4 + %mul.6 = mul i32 %tmp10, 42 + %add.6 = add i32 %mul.6, %add.5 + %tmp11 = load i32, i32* %arrayidx.7, align 4 + %mul.7 = mul i32 %tmp11, 42 + %add.7 = add i32 %mul.7, %add.6 + br i1 true, label %for.end, label %for.body + +for.end: + ret i32 %add.7 +}