Index: lib/Transforms/Vectorize/SLPVectorizer.cpp =================================================================== --- lib/Transforms/Vectorize/SLPVectorizer.cpp +++ lib/Transforms/Vectorize/SLPVectorizer.cpp @@ -439,6 +439,9 @@ /// \returns true if the memory operations A and B are consecutive. bool isConsecutiveAccess(Value *A, Value *B); + void reorderAltShuffleOperands(ArrayRef VL, + SmallVectorImpl &Left, + SmallVectorImpl &Right); /// \brief Perform LICM and CSE on the newly generated gather sequences. void optimizeGatherSequence(); @@ -1381,6 +1384,16 @@ } newTreeEntry(VL, true); DEBUG(dbgs() << "SLP: added a ShuffleVector op.\n"); + + // Reorder operands if reordering would enable vectorization. + if (isa(VL0)) { + ValueList Left, Right; + reorderAltShuffleOperands(VL, Left, Right); + buildTree_rec(Left, Depth + 1); + buildTree_rec(Right, Depth + 1); + return; + } + for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { ValueList Operands; // Prepare the operand vector. @@ -1818,6 +1831,33 @@ return X == PtrSCEVB; } +void BoUpSLP::reorderAltShuffleOperands(ArrayRef VL, + SmallVectorImpl &Left, + SmallVectorImpl &Right) { + + // Push left and right operands of binary operation into Left and Right + for (unsigned i = 0, e = VL.size(); i < e; ++i) { + Left.push_back(cast(VL[i])->getOperand(0)); + Right.push_back(cast(VL[i])->getOperand(1)); + } + + // Reorder if we have a commutative operation and consecutive access + // are on either side of the alternate instructions. + for (unsigned j = 0; j < VL.size() - 1; ++j) { + if (LoadInst *L = dyn_cast(Left[j])) { + if (LoadInst *L1 = dyn_cast(Right[j + 1])) { + Instruction *VL1 = cast(VL[j]); + Instruction *VL2 = cast(VL[j + 1]); + if (isConsecutiveAccess(L, L1) && VL1->isCommutative()) + std::swap(Left[j], Right[j]); + else if (isConsecutiveAccess(L, L1) && VL2->isCommutative()) + std::swap(Left[j + 1], Right[j + 1]); + // else unchanged + } + } + } +} + void BoUpSLP::setInsertPointAfterBundle(ArrayRef VL) { Instruction *VL0 = cast(VL[0]); BasicBlock::iterator NextInst = VL0; @@ -2214,9 +2254,13 @@ } case Instruction::ShuffleVector: { ValueList LHSVL, RHSVL; - for (int i = 0, e = E->Scalars.size(); i < e; ++i) { - LHSVL.push_back(cast(E->Scalars[i])->getOperand(0)); - RHSVL.push_back(cast(E->Scalars[i])->getOperand(1)); + if (isa(VL0)) + reorderAltShuffleOperands(E->Scalars, LHSVL, RHSVL); + else { + for (int i = 0, e = E->Scalars.size(); i < e; ++i) { + LHSVL.push_back(cast(E->Scalars[i])->getOperand(0)); + RHSVL.push_back(cast(E->Scalars[i])->getOperand(1)); + } } setInsertPointAfterBundle(E->Scalars); Index: test/Transforms/SLPVectorizer/X86/addsub.ll =================================================================== --- test/Transforms/SLPVectorizer/X86/addsub.ll +++ test/Transforms/SLPVectorizer/X86/addsub.ll @@ -10,6 +10,7 @@ @fb = common global [4 x float] zeroinitializer, align 16 @fc = common global [4 x float] zeroinitializer, align 16 @fa = common global [4 x float] zeroinitializer, align 16 +@fd = common global [4 x float] zeroinitializer, align 16 ; CHECK-LABEL: @addsub ; CHECK: %5 = add nsw <4 x i32> %3, %4 @@ -177,5 +178,90 @@ ret void } + +; CHECK-LABEL: @reorder_alt +; CHECK: %3 = fadd <4 x float> %1, %2 +; CHECK: %4 = fsub <4 x float> %1, %2 +; CHECK: %5 = shufflevector <4 x float> %3, <4 x float> %4, <4 x i32> +define void @reorder_alt() #0 { + %1 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 0), align 4 + %2 = load float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 0), align 4 + %3 = fadd float %1, %2 + store float %3, float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 0), align 4 + %4 = load float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 1), align 4 + %5 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 1), align 4 + %6 = fsub float %4, %5 + store float %6, float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 1), align 4 + %7 = load float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 2), align 4 + %8 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 2), align 4 + %9 = fadd float %7, %8 + store float %9, float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 2), align 4 + %10 = load float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 3), align 4 + %11 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 3), align 4 + %12 = fsub float %10, %11 + store float %12, float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 3), align 4 + ret void +} + +; CHECK-LABEL: @reorder_alt_subTree +; CHECK: %4 = fsub <4 x float> %3, %2 +; CHECK: %5 = fadd <4 x float> %3, %2 +; CHECK: %6 = shufflevector <4 x float> %4, <4 x float> %5, <4 x i32> +; CHECK: %7 = fadd <4 x float> %1, %6 +; CHECK: %8 = fsub <4 x float> %1, %6 +; CHECK: %9 = shufflevector <4 x float> %7, <4 x float> %8, <4 x i32> +define void @reorder_alt_subTree() #0 { + %1 = load float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 0), align 4 + %2 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 0), align 4 + %3 = load float* getelementptr inbounds ([4 x float]* @fd, i32 0, i64 0), align 4 + %4 = fsub float %2, %3 + %5 = fadd float %1, %4 + store float %5, float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 0), align 4 + %6 = load float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 1), align 4 + %7 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 1), align 4 + %8 = load float* getelementptr inbounds ([4 x float]* @fd, i32 0, i64 1), align 4 + %9 = fadd float %7, %8 + %10 = fsub float %6, %9 + store float %10, float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 1), align 4 + %11 = load float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 2), align 4 + %12 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 2), align 4 + %13 = load float* getelementptr inbounds ([4 x float]* @fd, i32 0, i64 2), align 4 + %14 = fsub float %12, %13 + %15 = fadd float %11, %14 + store float %15, float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 2), align 4 + %16 = load float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 3), align 4 + %17 = load float* getelementptr inbounds ([4 x float]* @fd, i32 0, i64 3), align 4 + %18 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 3), align 4 + %19 = fadd float %17, %18 + %20 = fsub float %16, %19 + store float %20, float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 3), align 4 + ret void +} + +; CHECK-LABEL: @no_vec_shuff_reorder +; CHECK-NOT: fadd <4 x float> +; CHECK-NOT: fsub <4 x float> +; CHECK-NOT: shufflevector +define void @no_vec_shuff_reorder() #0 { + %1 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 0), align 4 + %2 = load float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 0), align 4 + %3 = fadd float %1, %2 + store float %3, float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 0), align 4 + %4 = load float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 1), align 4 + %5 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 1), align 4 + %6 = fsub float %4, %5 + store float %6, float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 1), align 4 + %7 = load float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 2), align 4 + %8 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 2), align 4 + %9 = fadd float %7, %8 + store float %9, float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 2), align 4 + %10 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 3), align 4 + %11 = load float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 3), align 4 + %12 = fsub float %10, %11 + store float %12, float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 3), align 4 + ret void +} + + attributes #0 = { nounwind }