Index: include/llvm/Transforms/Vectorize/SLPVectorizer.h =================================================================== --- include/llvm/Transforms/Vectorize/SLPVectorizer.h +++ include/llvm/Transforms/Vectorize/SLPVectorizer.h @@ -82,8 +82,8 @@ ArrayRef BuildVector = None, bool AllowReorder = false); - /// \brief Try to vectorize a chain that may start at the operands of \V; - bool tryToVectorize(BinaryOperator *V, slpvectorizer::BoUpSLP &R); + /// \brief Try to vectorize a chain that may start at the operands of \p I. + bool tryToVectorize(Instruction *I, slpvectorizer::BoUpSLP &R); /// \brief Vectorize the store instructions collected in Stores. bool vectorizeStoreChains(slpvectorizer::BoUpSLP &R); @@ -98,6 +98,19 @@ slpvectorizer::BoUpSLP &R, TargetTransformInfo *TTI); + /// Try to vectorize trees that start at insertvalue instructions. + bool vectorizeInsertValueInst(InsertValueInst *IVI, BasicBlock *BB, + slpvectorizer::BoUpSLP &R); + /// Try to vectorize trees that start at insertelement instructions. + bool vectorizeInsertElementInst(InsertElementInst *IEI, BasicBlock *BB, + slpvectorizer::BoUpSLP &R); + /// Try to vectorize trees that start at compare instructions. + bool vectorizeCmpInst(CmpInst *CI, BasicBlock *BB, slpvectorizer::BoUpSLP &R); + /// Tries to vectorize constructs started from CmpInst, InsertValueInst or + /// InsertElementInst instructions. + bool vectorizeSimpleInstructions(SmallVectorImpl &Instructions, + BasicBlock *BB, slpvectorizer::BoUpSLP &R); + /// \brief Scan the basic block and look for patterns that are likely to start /// a vectorization chain. bool vectorizeChainsInBlock(BasicBlock *BB, slpvectorizer::BoUpSLP &R); Index: lib/Transforms/Vectorize/SLPVectorizer.cpp =================================================================== --- lib/Transforms/Vectorize/SLPVectorizer.cpp +++ lib/Transforms/Vectorize/SLPVectorizer.cpp @@ -4149,15 +4149,18 @@ return Changed; } -bool SLPVectorizerPass::tryToVectorize(BinaryOperator *V, BoUpSLP &R) { - if (!V) +bool SLPVectorizerPass::tryToVectorize(Instruction *I, BoUpSLP &R) { + if (!I) + return false; + + if (!isa(I) && !isa(I)) return false; - Value *P = V->getParent(); + Value *P = I->getParent(); // Vectorize in current basic block only. - auto *Op0 = dyn_cast(V->getOperand(0)); - auto *Op1 = dyn_cast(V->getOperand(1)); + auto *Op0 = dyn_cast(I->getOperand(0)); + auto *Op1 = dyn_cast(I->getOperand(1)); if (!Op0 || !Op1 || Op0->getParent() != P || Op1->getParent() != P) return false; @@ -4576,39 +4579,30 @@ /// %rb = insertelement <4 x float> %ra, float %s1, i32 1 /// %rc = insertelement <4 x float> %rb, float %s2, i32 2 /// %rd = insertelement <4 x float> %rc, float %s3, i32 3 +/// starting from the last insertelement instruction. /// /// Returns true if it matches /// -static bool findBuildVector(InsertElementInst *FirstInsertElem, +static bool findBuildVector(InsertElementInst *LastInsertElem, SmallVectorImpl &BuildVector, SmallVectorImpl &BuildVectorOpds) { - if (!isa(FirstInsertElem->getOperand(0))) - return false; - - InsertElementInst *IE = FirstInsertElem; - while (true) { - BuildVector.push_back(IE); - BuildVectorOpds.push_back(IE->getOperand(1)); - - if (IE->use_empty()) - return false; - - InsertElementInst *NextUse = dyn_cast(IE->user_back()); - if (!NextUse) - return true; - - // If this isn't the final use, make sure the next insertelement is the only - // use. It's OK if the final constructed vector is used multiple times - if (!IE->hasOneUse()) + Value *V = nullptr; + do { + BuildVector.push_back(LastInsertElem); + BuildVectorOpds.push_back(LastInsertElem->getOperand(1)); + V = LastInsertElem->getOperand(0); + if (isa(V)) + break; + LastInsertElem = dyn_cast(V); + if (!LastInsertElem || !LastInsertElem->hasOneUse()) return false; - - IE = NextUse; - } - - return false; + } while (true); + std::reverse(BuildVector.begin(), BuildVector.end()); + std::reverse(BuildVectorOpds.begin(), BuildVectorOpds.end()); + return true; } -/// \brief Like findBuildVector, but looks backwards for construction of aggregate. +/// \brief Like findBuildVector, but looks for construction of aggregate. /// /// \return true if it matches. static bool findBuildAggregate(InsertValueInst *IV, @@ -4735,10 +4729,10 @@ /// if it can be done. /// \returns true if a horizontal reduction was matched and reduced. /// \returns false if a horizontal reduction was not matched. -static bool canBeVectorized( - PHINode *P, Instruction *Root, BasicBlock *BB, BoUpSLP &R, - TargetTransformInfo *TTI, - const function_ref Vectorize) { +static bool +canBeVectorized(PHINode *P, Instruction *Root, BasicBlock *BB, BoUpSLP &R, + TargetTransformInfo *TTI, + const function_ref Vectorize) { if (!ShouldVectorizeHor) return false; @@ -4783,7 +4777,7 @@ } } P = nullptr; - if (Vectorize(dyn_cast(Inst), R)) { + if (Vectorize(Inst, R)) { Res = true; continue; } @@ -4813,10 +4807,68 @@ if (!isa(I)) P = nullptr; // Try to match and vectorize a horizontal reduction. - return canBeVectorized(P, I, BB, R, TTI, - [this](BinaryOperator *BI, BoUpSLP &R) -> bool { - return tryToVectorize(BI, R); - }); + auto &&ExtraVectorization = [this](Instruction *I, BoUpSLP &R) -> bool { + return tryToVectorize(I, R); + }; + return canBeVectorized(P, I, BB, R, TTI, ExtraVectorization); +} + +bool SLPVectorizerPass::vectorizeInsertValueInst(InsertValueInst *IVI, + BasicBlock *BB, BoUpSLP &R) { + const DataLayout &DL = BB->getModule()->getDataLayout(); + if (!R.canMapToVector(IVI->getType(), DL)) + return false; + + SmallVector BuildVector; + SmallVector BuildVectorOpds; + if (!findBuildAggregate(IVI, BuildVector, BuildVectorOpds)) + return false; + + DEBUG(dbgs() << "SLP: array mappable to vector: " << *IVI << "\n"); + return tryToVectorizeList(BuildVectorOpds, R, BuildVector, false); +} + +bool SLPVectorizerPass::vectorizeInsertElementInst(InsertElementInst *IEI, + BasicBlock *BB, BoUpSLP &R) { + SmallVector BuildVector; + SmallVector BuildVectorOpds; + if (!findBuildVector(IEI, BuildVector, BuildVectorOpds)) + return false; + + // Vectorize starting with the build vector operands ignoring the BuildVector + // instructions for the purpose of scheduling and user extraction. + return tryToVectorizeList(BuildVectorOpds, R, BuildVector); +} + +bool SLPVectorizerPass::vectorizeCmpInst(CmpInst *CI, BasicBlock *BB, + BoUpSLP &R) { + if (tryToVectorizePair(CI->getOperand(0), CI->getOperand(1), R)) + return true; + + bool OpsChanged = false; + for (int Idx = 0; Idx < 2; ++Idx) { + OpsChanged |= + vectorizeRootInstruction(nullptr, CI->getOperand(Idx), BB, R, TTI); + } + return OpsChanged; +} + +bool SLPVectorizerPass::vectorizeSimpleInstructions( + SmallVectorImpl &Instructions, BasicBlock *BB, BoUpSLP &R) { + bool OpsChanged = false; + for (auto &VH : reverse(Instructions)) { + auto *I = dyn_cast_or_null(VH); + if (!I) + continue; + if (auto *LastInsertValue = dyn_cast(I)) + OpsChanged |= vectorizeInsertValueInst(LastInsertValue, BB, R); + else if (auto *LastInsertElem = dyn_cast(I)) + OpsChanged |= vectorizeInsertElementInst(LastInsertElem, BB, R); + else if (auto *CI = dyn_cast(I)) + OpsChanged |= vectorizeCmpInst(CI, BB, R); + } + Instructions.clear(); + return OpsChanged; } bool SLPVectorizerPass::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) { @@ -4872,10 +4924,21 @@ VisitedInstrs.clear(); + SmallVector PostProcessInstructions; + SmallDenseSet KeyNodes; for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; it++) { // We may go through BB multiple times so skip the one we have checked. - if (!VisitedInstrs.insert(&*it).second) + if (!VisitedInstrs.insert(&*it).second) { + if (it->use_empty() && KeyNodes.count(&*it) > 0 && + vectorizeSimpleInstructions(PostProcessInstructions, BB, R)) { + // We would like to start over since some instructions are deleted + // and the iterator may become invalid value. + Changed = true; + it = BB->begin(); + e = BB->end(); + } continue; + } if (isa(it)) continue; @@ -4897,97 +4960,39 @@ continue; } - if (ShouldStartVectorizeHorAtStore) { - if (StoreInst *SI = dyn_cast(it)) { - // Try to match and vectorize a horizontal reduction. - if (vectorizeRootInstruction(nullptr, SI->getValueOperand(), BB, R, - TTI)) { - Changed = true; - it = BB->begin(); - e = BB->end(); - continue; + // Ran into an instruction without users, like terminator, or function call + // with ignored return value, store. Ignore unused instructions (basing on + // instruction type, except for CallInst and InvokeInst). + if (it->use_empty() && (it->getType()->isVoidTy() || isa(it) || + isa(it))) { + KeyNodes.insert(&*it); + bool OpsChanged = false; + if (ShouldStartVectorizeHorAtStore || !isa(it)) { + for (auto *V : it->operand_values()) { + // Try to match and vectorize a horizontal reduction. + OpsChanged |= vectorizeRootInstruction(nullptr, V, BB, R, TTI); } } - } - - // Try to vectorize horizontal reductions feeding into a return. - if (ReturnInst *RI = dyn_cast(it)) { - if (RI->getNumOperands() != 0) { - // Try to match and vectorize a horizontal reduction. - if (vectorizeRootInstruction(nullptr, RI->getOperand(0), BB, R, TTI)) { - Changed = true; - it = BB->begin(); - e = BB->end(); - continue; - } - } - } - - // Try to vectorize trees that start at compare instructions. - if (CmpInst *CI = dyn_cast(it)) { - if (tryToVectorizePair(CI->getOperand(0), CI->getOperand(1), R)) { - Changed = true; + // Start vectorization of post-process list of instructions from the + // top-tree instructions to try to vectorize as many instructions as + // possible. + OpsChanged |= vectorizeSimpleInstructions(PostProcessInstructions, BB, R); + if (OpsChanged) { // We would like to start over since some instructions are deleted // and the iterator may become invalid value. - it = BB->begin(); - e = BB->end(); - continue; - } - - for (int I = 0; I < 2; ++I) { - if (vectorizeRootInstruction(nullptr, CI->getOperand(I), BB, R, TTI)) { - Changed = true; - // We would like to start over since some instructions are deleted - // and the iterator may become invalid value. - it = BB->begin(); - e = BB->end(); - break; - } - } - continue; - } - - // Try to vectorize trees that start at insertelement instructions. - if (InsertElementInst *FirstInsertElem = dyn_cast(it)) { - SmallVector BuildVector; - SmallVector BuildVectorOpds; - if (!findBuildVector(FirstInsertElem, BuildVector, BuildVectorOpds)) - continue; - - // Vectorize starting with the build vector operands ignoring the - // BuildVector instructions for the purpose of scheduling and user - // extraction. - if (tryToVectorizeList(BuildVectorOpds, R, BuildVector)) { Changed = true; it = BB->begin(); e = BB->end(); + continue; } - - continue; } - // Try to vectorize trees that start at insertvalue instructions feeding into - // a store. - if (StoreInst *SI = dyn_cast(it)) { - if (InsertValueInst *LastInsertValue = dyn_cast(SI->getValueOperand())) { - const DataLayout &DL = BB->getModule()->getDataLayout(); - if (R.canMapToVector(SI->getValueOperand()->getType(), DL)) { - SmallVector BuildVector; - SmallVector BuildVectorOpds; - if (!findBuildAggregate(LastInsertValue, BuildVector, BuildVectorOpds)) - continue; + if (isa(it) || isa(it) || + isa(it)) + PostProcessInstructions.push_back(&*it); - DEBUG(dbgs() << "SLP: store of array mappable to vector: " << *SI << "\n"); - if (tryToVectorizeList(BuildVectorOpds, R, BuildVector, false)) { - Changed = true; - it = BB->begin(); - e = BB->end(); - } - continue; - } - } - } } + assert(PostProcessInstructions.empty()); return Changed; } Index: test/Transforms/SLPVectorizer/AArch64/gather-root.ll =================================================================== --- test/Transforms/SLPVectorizer/AArch64/gather-root.ll +++ test/Transforms/SLPVectorizer/AArch64/gather-root.ll @@ -1,3 +1,4 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt < %s -slp-vectorizer -S | FileCheck %s --check-prefix=DEFAULT ; RUN: opt < %s -slp-schedule-budget=0 -slp-min-tree-size=0 -slp-threshold=-30 -slp-vectorizer -S | FileCheck %s --check-prefix=GATHER ; RUN: opt < %s -slp-schedule-budget=0 -slp-threshold=-30 -slp-vectorizer -S | FileCheck %s --check-prefix=MAX-COST @@ -20,31 +21,56 @@ ; DEFAULT: %tmp34 = add i32 %[[R6]], %tmp17 ; ; GATHER-LABEL: @PR28330( -; GATHER: %tmp17 = phi i32 [ %tmp34, %for.body ], [ 0, %entry ] -; GATHER: %tmp19 = select i1 %tmp1, i32 -720, i32 -80 -; GATHER: %tmp21 = select i1 %tmp3, i32 -720, i32 -80 -; GATHER: %tmp23 = select i1 %tmp5, i32 -720, i32 -80 -; GATHER: %tmp25 = select i1 %tmp7, i32 -720, i32 -80 -; GATHER: %tmp27 = select i1 %tmp9, i32 -720, i32 -80 -; GATHER: %tmp29 = select i1 %tmp11, i32 -720, i32 -80 -; GATHER: %tmp31 = select i1 %tmp13, i32 -720, i32 -80 -; GATHER: %tmp33 = select i1 %tmp15, i32 -720, i32 -80 -; GATHER: %[[I0:.+]] = insertelement <8 x i32> undef, i32 %tmp19, i32 0 -; GATHER: %[[I1:.+]] = insertelement <8 x i32> %[[I0]], i32 %tmp21, i32 1 -; GATHER: %[[I2:.+]] = insertelement <8 x i32> %[[I1]], i32 %tmp23, i32 2 -; GATHER: %[[I3:.+]] = insertelement <8 x i32> %[[I2]], i32 %tmp25, i32 3 -; GATHER: %[[I4:.+]] = insertelement <8 x i32> %[[I3]], i32 %tmp27, i32 4 -; GATHER: %[[I5:.+]] = insertelement <8 x i32> %[[I4]], i32 %tmp29, i32 5 -; GATHER: %[[I6:.+]] = insertelement <8 x i32> %[[I5]], i32 %tmp31, i32 6 -; GATHER: %[[I7:.+]] = insertelement <8 x i32> %[[I6]], i32 %tmp33, i32 7 -; GATHER: %[[R0:.+]] = shufflevector <8 x i32> %[[I7]], <8 x i32> undef, <8 x i32> -; GATHER: %[[R1:.+]] = add <8 x i32> %[[I7]], %[[R0]] -; GATHER: %[[R2:.+]] = shufflevector <8 x i32> %[[R1]], <8 x i32> undef, <8 x i32> -; GATHER: %[[R3:.+]] = add <8 x i32> %[[R1]], %[[R2]] -; GATHER: %[[R4:.+]] = shufflevector <8 x i32> %[[R3]], <8 x i32> undef, <8 x i32> -; GATHER: %[[R5:.+]] = add <8 x i32> %[[R3]], %[[R4]] -; GATHER: %[[R6:.+]] = extractelement <8 x i32> %[[R5]], i32 0 -; GATHER: %tmp34 = add i32 %[[R6]], %tmp17 +; GATHER: [[TMP0:%.*]] = load <2 x i8>, <2 x i8>* bitcast (i8* getelementptr inbounds ([80 x i8], [80 x i8]* @a, i64 0, i64 1) to <2 x i8>*), align 1 +; GATHER-NEXT: [[TMP1:%.*]] = icmp eq <2 x i8> [[TMP0]], zeroinitializer +; GATHER-NEXT: [[TMP4:%.*]] = load i8, i8* getelementptr inbounds ([80 x i8], [80 x i8]* @a, i64 0, i64 3), align 1 +; GATHER-NEXT: [[TMP5:%.*]] = icmp eq i8 [[TMP4]], 0 +; GATHER-NEXT: [[TMP6:%.*]] = load i8, i8* getelementptr inbounds ([80 x i8], [80 x i8]* @a, i64 0, i64 4), align 4 +; GATHER-NEXT: [[TMP7:%.*]] = icmp eq i8 [[TMP6]], 0 +; GATHER-NEXT: [[TMP8:%.*]] = load i8, i8* getelementptr inbounds ([80 x i8], [80 x i8]* @a, i64 0, i64 5), align 1 +; GATHER-NEXT: [[TMP9:%.*]] = icmp eq i8 [[TMP8]], 0 +; GATHER-NEXT: [[TMP10:%.*]] = load i8, i8* getelementptr inbounds ([80 x i8], [80 x i8]* @a, i64 0, i64 6), align 2 +; GATHER-NEXT: [[TMP11:%.*]] = icmp eq i8 [[TMP10]], 0 +; GATHER-NEXT: [[TMP12:%.*]] = load i8, i8* getelementptr inbounds ([80 x i8], [80 x i8]* @a, i64 0, i64 7), align 1 +; GATHER-NEXT: [[TMP13:%.*]] = icmp eq i8 [[TMP12]], 0 +; GATHER-NEXT: [[TMP14:%.*]] = load i8, i8* getelementptr inbounds ([80 x i8], [80 x i8]* @a, i64 0, i64 8), align 8 +; GATHER-NEXT: [[TMP15:%.*]] = icmp eq i8 [[TMP14]], 0 +; GATHER-NEXT: br label [[FOR_BODY:%.*]] +; GATHER: for.body: +; GATHER-NEXT: [[TMP17:%.*]] = phi i32 [ [[TMP34:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY:%.*]] ] +; GATHER-NEXT: [[TMP2:%.*]] = select <2 x i1> [[TMP1]], <2 x i32> , <2 x i32> +; GATHER-NEXT: [[TMP3:%.*]] = extractelement <2 x i32> [[TMP2]], i32 0 +; GATHER-NEXT: [[TMP20:%.*]] = add i32 [[TMP17]], [[TMP3]] +; GATHER-NEXT: [[TMP4:%.*]] = extractelement <2 x i32> [[TMP2]], i32 1 +; GATHER-NEXT: [[TMP22:%.*]] = add i32 [[TMP20]], [[TMP4]] +; GATHER-NEXT: [[TMP23:%.*]] = select i1 [[TMP5]], i32 -720, i32 -80 +; GATHER-NEXT: [[TMP24:%.*]] = add i32 [[TMP22]], [[TMP23]] +; GATHER-NEXT: [[TMP25:%.*]] = select i1 [[TMP7]], i32 -720, i32 -80 +; GATHER-NEXT: [[TMP26:%.*]] = add i32 [[TMP24]], [[TMP25]] +; GATHER-NEXT: [[TMP27:%.*]] = select i1 [[TMP9]], i32 -720, i32 -80 +; GATHER-NEXT: [[TMP28:%.*]] = add i32 [[TMP26]], [[TMP27]] +; GATHER-NEXT: [[TMP29:%.*]] = select i1 [[TMP11]], i32 -720, i32 -80 +; GATHER-NEXT: [[TMP30:%.*]] = add i32 [[TMP28]], [[TMP29]] +; GATHER-NEXT: [[TMP31:%.*]] = select i1 [[TMP13]], i32 -720, i32 -80 +; GATHER-NEXT: [[TMP32:%.*]] = add i32 [[TMP30]], [[TMP31]] +; GATHER-NEXT: [[TMP33:%.*]] = select i1 [[TMP15]], i32 -720, i32 -80 +; GATHER-NEXT: [[TMP5:%.*]] = insertelement <8 x i32> undef, i32 [[TMP3]], i32 0 +; GATHER-NEXT: [[TMP6:%.*]] = insertelement <8 x i32> [[TMP5]], i32 [[TMP4]], i32 1 +; GATHER-NEXT: [[TMP7:%.*]] = insertelement <8 x i32> [[TMP6]], i32 [[TMP23]], i32 2 +; GATHER-NEXT: [[TMP8:%.*]] = insertelement <8 x i32> [[TMP7]], i32 [[TMP25]], i32 3 +; GATHER-NEXT: [[TMP9:%.*]] = insertelement <8 x i32> [[TMP8]], i32 [[TMP27]], i32 4 +; GATHER-NEXT: [[TMP10:%.*]] = insertelement <8 x i32> [[TMP9]], i32 [[TMP29]], i32 5 +; GATHER-NEXT: [[TMP11:%.*]] = insertelement <8 x i32> [[TMP10]], i32 [[TMP31]], i32 6 +; GATHER-NEXT: [[TMP12:%.*]] = insertelement <8 x i32> [[TMP11]], i32 [[TMP33]], i32 7 +; GATHER-NEXT: [[RDX_SHUF:%.*]] = shufflevector <8 x i32> [[TMP12]], <8 x i32> undef, <8 x i32> +; GATHER-NEXT: [[BIN_RDX:%.*]] = add <8 x i32> [[TMP12]], [[RDX_SHUF]] +; GATHER-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <8 x i32> [[BIN_RDX]], <8 x i32> undef, <8 x i32> +; GATHER-NEXT: [[BIN_RDX2:%.*]] = add <8 x i32> [[BIN_RDX]], [[RDX_SHUF1]] +; GATHER-NEXT: [[RDX_SHUF3:%.*]] = shufflevector <8 x i32> [[BIN_RDX2]], <8 x i32> undef, <8 x i32> +; GATHER-NEXT: [[BIN_RDX4:%.*]] = add <8 x i32> [[BIN_RDX2]], [[RDX_SHUF3]] +; GATHER-NEXT: [[TMP13:%.*]] = extractelement <8 x i32> [[BIN_RDX4]], i32 0 +; GATHER-NEXT: [[TMP34]] = add i32 [[TMP13]], [[TMP17]] +; GATHER-NEXT: br label [[FOR_BODY]] ; ; MAX-COST-LABEL: @PR28330( ; MAX-COST-NOT: shufflevector Index: test/Transforms/SLPVectorizer/X86/horizontal.ll =================================================================== --- test/Transforms/SLPVectorizer/X86/horizontal.ll +++ test/Transforms/SLPVectorizer/X86/horizontal.ll @@ -817,22 +817,22 @@ define void @i32_red_call(i32 %val) { ; CHECK-LABEL: @i32_red_call( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 0), align 16 -; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 1), align 4 -; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[TMP0]] -; CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 2), align 8 -; CHECK-NEXT: [[ADD_1:%.*]] = add nsw i32 [[TMP2]], [[ADD]] -; CHECK-NEXT: [[TMP3:%.*]] = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 3), align 4 -; CHECK-NEXT: [[ADD_2:%.*]] = add nsw i32 [[TMP3]], [[ADD_1]] -; CHECK-NEXT: [[TMP4:%.*]] = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 4), align 16 -; CHECK-NEXT: [[ADD_3:%.*]] = add nsw i32 [[TMP4]], [[ADD_2]] -; CHECK-NEXT: [[TMP5:%.*]] = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 5), align 4 -; CHECK-NEXT: [[ADD_4:%.*]] = add nsw i32 [[TMP5]], [[ADD_3]] -; CHECK-NEXT: [[TMP6:%.*]] = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 6), align 8 -; CHECK-NEXT: [[ADD_5:%.*]] = add nsw i32 [[TMP6]], [[ADD_4]] -; CHECK-NEXT: [[TMP7:%.*]] = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 7), align 4 -; CHECK-NEXT: [[ADD_6:%.*]] = add nsw i32 [[TMP7]], [[ADD_5]] -; CHECK-NEXT: [[RES:%.*]] = call i32 @foobar(i32 [[ADD_6]]) +; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([32 x i32]* @arr_i32 to <8 x i32>*), align 16 +; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 undef, undef +; CHECK-NEXT: [[ADD_1:%.*]] = add nsw i32 undef, [[ADD]] +; CHECK-NEXT: [[ADD_2:%.*]] = add nsw i32 undef, [[ADD_1]] +; CHECK-NEXT: [[ADD_3:%.*]] = add nsw i32 undef, [[ADD_2]] +; CHECK-NEXT: [[ADD_4:%.*]] = add nsw i32 undef, [[ADD_3]] +; CHECK-NEXT: [[ADD_5:%.*]] = add nsw i32 undef, [[ADD_4]] +; CHECK-NEXT: [[RDX_SHUF:%.*]] = shufflevector <8 x i32> [[TMP0]], <8 x i32> undef, <8 x i32> +; CHECK-NEXT: [[BIN_RDX:%.*]] = add <8 x i32> [[TMP0]], [[RDX_SHUF]] +; CHECK-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <8 x i32> [[BIN_RDX]], <8 x i32> undef, <8 x i32> +; CHECK-NEXT: [[BIN_RDX2:%.*]] = add <8 x i32> [[BIN_RDX]], [[RDX_SHUF1]] +; CHECK-NEXT: [[RDX_SHUF3:%.*]] = shufflevector <8 x i32> [[BIN_RDX2]], <8 x i32> undef, <8 x i32> +; CHECK-NEXT: [[BIN_RDX4:%.*]] = add <8 x i32> [[BIN_RDX2]], [[RDX_SHUF3]] +; CHECK-NEXT: [[TMP1:%.*]] = extractelement <8 x i32> [[BIN_RDX4]], i32 0 +; CHECK-NEXT: [[ADD_6:%.*]] = add nsw i32 undef, [[ADD_5]] +; CHECK-NEXT: [[RES:%.*]] = call i32 @foobar(i32 [[TMP1]]) ; CHECK-NEXT: ret void ; entry: @@ -858,22 +858,22 @@ define void @i32_red_invoke(i32 %val) personality i32 (...)* @__gxx_personality_v0 { ; CHECK-LABEL: @i32_red_invoke( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 0), align 16 -; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 1), align 4 -; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[TMP0]] -; CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 2), align 8 -; CHECK-NEXT: [[ADD_1:%.*]] = add nsw i32 [[TMP2]], [[ADD]] -; CHECK-NEXT: [[TMP3:%.*]] = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 3), align 4 -; CHECK-NEXT: [[ADD_2:%.*]] = add nsw i32 [[TMP3]], [[ADD_1]] -; CHECK-NEXT: [[TMP4:%.*]] = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 4), align 16 -; CHECK-NEXT: [[ADD_3:%.*]] = add nsw i32 [[TMP4]], [[ADD_2]] -; CHECK-NEXT: [[TMP5:%.*]] = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 5), align 4 -; CHECK-NEXT: [[ADD_4:%.*]] = add nsw i32 [[TMP5]], [[ADD_3]] -; CHECK-NEXT: [[TMP6:%.*]] = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 6), align 8 -; CHECK-NEXT: [[ADD_5:%.*]] = add nsw i32 [[TMP6]], [[ADD_4]] -; CHECK-NEXT: [[TMP7:%.*]] = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 7), align 4 -; CHECK-NEXT: [[ADD_6:%.*]] = add nsw i32 [[TMP7]], [[ADD_5]] -; CHECK-NEXT: [[RES:%.*]] = invoke i32 @foobar(i32 [[ADD_6]]) +; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([32 x i32]* @arr_i32 to <8 x i32>*), align 16 +; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 undef, undef +; CHECK-NEXT: [[ADD_1:%.*]] = add nsw i32 undef, [[ADD]] +; CHECK-NEXT: [[ADD_2:%.*]] = add nsw i32 undef, [[ADD_1]] +; CHECK-NEXT: [[ADD_3:%.*]] = add nsw i32 undef, [[ADD_2]] +; CHECK-NEXT: [[ADD_4:%.*]] = add nsw i32 undef, [[ADD_3]] +; CHECK-NEXT: [[ADD_5:%.*]] = add nsw i32 undef, [[ADD_4]] +; CHECK-NEXT: [[RDX_SHUF:%.*]] = shufflevector <8 x i32> [[TMP0]], <8 x i32> undef, <8 x i32> +; CHECK-NEXT: [[BIN_RDX:%.*]] = add <8 x i32> [[TMP0]], [[RDX_SHUF]] +; CHECK-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <8 x i32> [[BIN_RDX]], <8 x i32> undef, <8 x i32> +; CHECK-NEXT: [[BIN_RDX2:%.*]] = add <8 x i32> [[BIN_RDX]], [[RDX_SHUF1]] +; CHECK-NEXT: [[RDX_SHUF3:%.*]] = shufflevector <8 x i32> [[BIN_RDX2]], <8 x i32> undef, <8 x i32> +; CHECK-NEXT: [[BIN_RDX4:%.*]] = add <8 x i32> [[BIN_RDX2]], [[RDX_SHUF3]] +; CHECK-NEXT: [[TMP1:%.*]] = extractelement <8 x i32> [[BIN_RDX4]], i32 0 +; CHECK-NEXT: [[ADD_6:%.*]] = add nsw i32 undef, [[ADD_5]] +; CHECK-NEXT: [[RES:%.*]] = invoke i32 @foobar(i32 [[TMP1]]) ; CHECK-NEXT: to label [[NORMAL:%.*]] unwind label [[EXCEPTION:%.*]] ; CHECK: exception: ; CHECK-NEXT: [[CLEANUP:%.*]] = landingpad i8 Index: test/Transforms/SLPVectorizer/X86/insert-element-build-vector.ll =================================================================== --- test/Transforms/SLPVectorizer/X86/insert-element-build-vector.ll +++ test/Transforms/SLPVectorizer/X86/insert-element-build-vector.ll @@ -303,24 +303,30 @@ ; CHECK-NEXT: [[B1:%.*]] = extractelement <4 x float> %b, i32 1 ; CHECK-NEXT: [[B2:%.*]] = extractelement <4 x float> %b, i32 2 ; CHECK-NEXT: [[B3:%.*]] = extractelement <4 x float> %b, i32 3 -; CHECK-NEXT: [[CMP0:%.*]] = icmp ne i32 [[C0]], 0 -; CHECK-NEXT: [[CMP1:%.*]] = icmp ne i32 [[C1]], 0 -; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x i32> undef, i32 [[C2]], i32 0 -; CHECK-NEXT: [[TMP2:%.*]] = insertelement <2 x i32> [[TMP1]], i32 [[C3]], i32 1 +; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x i32> undef, i32 [[C0]], i32 0 +; CHECK-NEXT: [[TMP2:%.*]] = insertelement <2 x i32> [[TMP1]], i32 [[C1]], i32 1 ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <2 x i32> [[TMP2]], zeroinitializer -; CHECK-NEXT: [[S0:%.*]] = select i1 [[CMP0]], float [[A0]], float [[B0]] -; CHECK-NEXT: [[S1:%.*]] = select i1 [[CMP1]], float [[A1]], float [[B1]] -; CHECK-NEXT: [[TMP4:%.*]] = insertelement <2 x float> undef, float [[A2]], i32 0 -; CHECK-NEXT: [[TMP5:%.*]] = insertelement <2 x float> [[TMP4]], float [[A3]], i32 1 -; CHECK-NEXT: [[TMP6:%.*]] = insertelement <2 x float> undef, float [[B2]], i32 0 -; CHECK-NEXT: [[TMP7:%.*]] = insertelement <2 x float> [[TMP6]], float [[B3]], i32 1 -; CHECK-NEXT: [[TMP8:%.*]] = select <2 x i1> [[TMP3]], <2 x float> [[TMP5]], <2 x float> [[TMP7]] -; CHECK-NEXT: [[RA:%.*]] = insertelement <4 x float> undef, float [[S0]], i32 0 -; CHECK-NEXT: [[RB:%.*]] = insertelement <4 x float> [[RA]], float [[S1]], i32 1 -; CHECK-NEXT: [[TMP9:%.*]] = extractelement <2 x float> [[TMP8]], i32 0 -; CHECK-NEXT: [[RC:%.*]] = insertelement <4 x float> undef, float [[TMP9]], i32 2 -; CHECK-NEXT: [[TMP10:%.*]] = extractelement <2 x float> [[TMP8]], i32 1 -; CHECK-NEXT: [[RD:%.*]] = insertelement <4 x float> [[RC]], float [[TMP10]], i32 3 +; CHECK-NEXT: [[TMP4:%.*]] = insertelement <2 x i32> undef, i32 [[C2]], i32 0 +; CHECK-NEXT: [[TMP5:%.*]] = insertelement <2 x i32> [[TMP4]], i32 [[C3]], i32 1 +; CHECK-NEXT: [[TMP6:%.*]] = icmp ne <2 x i32> [[TMP5]], zeroinitializer +; CHECK-NEXT: [[TMP7:%.*]] = insertelement <2 x float> undef, float [[A0]], i32 0 +; CHECK-NEXT: [[TMP8:%.*]] = insertelement <2 x float> [[TMP7]], float [[A1]], i32 1 +; CHECK-NEXT: [[TMP9:%.*]] = insertelement <2 x float> undef, float [[B0]], i32 0 +; CHECK-NEXT: [[TMP10:%.*]] = insertelement <2 x float> [[TMP9]], float [[B1]], i32 1 +; CHECK-NEXT: [[TMP11:%.*]] = select <2 x i1> [[TMP3]], <2 x float> [[TMP8]], <2 x float> [[TMP10]] +; CHECK-NEXT: [[TMP12:%.*]] = insertelement <2 x float> undef, float [[A2]], i32 0 +; CHECK-NEXT: [[TMP13:%.*]] = insertelement <2 x float> [[TMP12]], float [[A3]], i32 1 +; CHECK-NEXT: [[TMP14:%.*]] = insertelement <2 x float> undef, float [[B2]], i32 0 +; CHECK-NEXT: [[TMP15:%.*]] = insertelement <2 x float> [[TMP14]], float [[B3]], i32 1 +; CHECK-NEXT: [[TMP16:%.*]] = select <2 x i1> [[TMP6]], <2 x float> [[TMP13]], <2 x float> [[TMP15]] +; CHECK-NEXT: [[TMP17:%.*]] = extractelement <2 x float> [[TMP11]], i32 0 +; CHECK-NEXT: [[RA:%.*]] = insertelement <4 x float> undef, float [[TMP17]], i32 0 +; CHECK-NEXT: [[TMP18:%.*]] = extractelement <2 x float> [[TMP11]], i32 1 +; CHECK-NEXT: [[RB:%.*]] = insertelement <4 x float> [[RA]], float [[TMP18]], i32 1 +; CHECK-NEXT: [[TMP19:%.*]] = extractelement <2 x float> [[TMP16]], i32 0 +; CHECK-NEXT: [[RC:%.*]] = insertelement <4 x float> undef, float [[TMP19]], i32 2 +; CHECK-NEXT: [[TMP20:%.*]] = extractelement <2 x float> [[TMP16]], i32 1 +; CHECK-NEXT: [[RD:%.*]] = insertelement <4 x float> [[RC]], float [[TMP20]], i32 3 ; CHECK-NEXT: ret <4 x float> [[RD]] ; ; ZEROTHRESH-LABEL: @simple_select_no_users(