diff --git a/llvm/include/llvm/Transforms/Vectorize/SLPVectorizer.h b/llvm/include/llvm/Transforms/Vectorize/SLPVectorizer.h --- a/llvm/include/llvm/Transforms/Vectorize/SLPVectorizer.h +++ b/llvm/include/llvm/Transforms/Vectorize/SLPVectorizer.h @@ -104,6 +104,11 @@ /// Try to vectorize a chain that may start at the operands of \p I. bool tryToVectorize(Instruction *I, slpvectorizer::BoUpSLP &R); + /// Try to vectorize chains that may start at the operands of + /// instructions in \p Insts. + bool tryToVectorize(ArrayRef Insts, + slpvectorizer::BoUpSLP &R); + /// Vectorize the store instructions collected in Stores. bool vectorizeStoreChains(slpvectorizer::BoUpSLP &R); @@ -130,14 +135,6 @@ slpvectorizer::BoUpSLP &R, TargetTransformInfo *TTI); - /// Try to vectorize trees that start at insertvalue instructions. - bool vectorizeInsertValueInst(InsertValueInst *IVI, BasicBlock *BB, - slpvectorizer::BoUpSLP &R); - - /// Try to vectorize trees that start at insertelement instructions. - bool vectorizeInsertElementInst(InsertElementInst *IEI, BasicBlock *BB, - slpvectorizer::BoUpSLP &R); - /// Tries to vectorize constructs started from CmpInst, InsertValueInst or /// InsertElementInst instructions. bool vectorizeSimpleInstructions(InstSetVector &Instructions, BasicBlock *BB, diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp --- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp +++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp @@ -11559,6 +11559,8 @@ SmallVectorImpl &BuildVectorOpds, SmallVectorImpl &InsertElts, unsigned OperandOffset) { + assert((isa(LastInsertInst)) && + "Expected insertelement or insertvalue instruction!"); do { Value *InsertedOperand = LastInsertInst->getOperand(1); Optional OperandIndex = @@ -11597,13 +11599,10 @@ TargetTransformInfo *TTI, SmallVectorImpl &BuildVectorOpds, SmallVectorImpl &InsertElts) { - - assert((isa(LastInsertInst) || - isa(LastInsertInst)) && - "Expected insertelement or insertvalue instruction!"); - assert((BuildVectorOpds.empty() && InsertElts.empty()) && "Expected empty result vectors!"); + if (!isa(LastInsertInst)) + return false; Optional AggregateSize = getAggregateSize(LastInsertInst); if (!AggregateSize) @@ -11796,44 +11795,18 @@ TargetTransformInfo *TTI) { SmallVector PostponedInsts; bool Res = vectorizeRootInstruction(P, V, BB, R, TTI, PostponedInsts); - // Try to vectorize binops where reductions were not found. - for (Value *Op : PostponedInsts) - if (auto *Inst = dyn_cast(Op)) - if (!R.isDeleted(Inst)) - Res |= tryToVectorize(Inst, R); + Res |= tryToVectorize(PostponedInsts, R); return Res; } -bool SLPVectorizerPass::vectorizeInsertValueInst(InsertValueInst *IVI, - BasicBlock *BB, BoUpSLP &R) { - const DataLayout &DL = BB->getModule()->getDataLayout(); - if (!R.canMapToVector(IVI->getType(), DL)) - return false; - - SmallVector BuildVectorOpds; - SmallVector BuildVectorInsts; - if (!findBuildAggregate(IVI, TTI, BuildVectorOpds, BuildVectorInsts)) - return false; - - LLVM_DEBUG(dbgs() << "SLP: array mappable to vector: " << *IVI << "\n"); - // Aggregate value is unlikely to be processed in vector register. - return tryToVectorizeList(BuildVectorOpds, R); -} - -bool SLPVectorizerPass::vectorizeInsertElementInst(InsertElementInst *IEI, - BasicBlock *BB, BoUpSLP &R) { - SmallVector BuildVectorInsts; - SmallVector BuildVectorOpds; - SmallVector Mask; - if (!findBuildAggregate(IEI, TTI, BuildVectorOpds, BuildVectorInsts) || - (llvm::all_of( - BuildVectorOpds, - [](Value *V) { return isa(V); }) && - isFixedVectorShuffle(BuildVectorOpds, Mask))) - return false; - - LLVM_DEBUG(dbgs() << "SLP: array mappable to vector: " << *IEI << "\n"); - return tryToVectorizeList(BuildVectorInsts, R); +bool SLPVectorizerPass::tryToVectorize(ArrayRef Insts, + BoUpSLP &R) { + bool Res = false; + for (Value *V : Insts) + if (auto *Inst = dyn_cast(V)) + if (!R.isDeleted(Inst)) + Res |= tryToVectorize(Inst, R); + return Res; } template @@ -11969,16 +11942,44 @@ for (auto *I : reverse(Instructions)) { if (R.isDeleted(I)) continue; - if (auto *LastInsertValue = dyn_cast(I)) { - OpsChanged |= vectorizeInsertValueInst(LastInsertValue, BB, R); - } else if (auto *LastInsertElem = dyn_cast(I)) { - OpsChanged |= vectorizeInsertElementInst(LastInsertElem, BB, R); - } else if (isa(I)) { + if (isa(I)) { PostponedCmps.push_back(I); continue; } - // Try to find reductions in buildvector sequnces. - OpsChanged |= vectorizeRootInstruction(nullptr, I, BB, R, TTI); + SmallVector BuildVectorOpds; + SmallVector BuildVectorInsts; + if (!findBuildAggregate(I, TTI, BuildVectorOpds, BuildVectorInsts)) + continue; + + // Try to find reductions in buildvector sequences. + SmallVector PostponedInsts; + for (Value *Op : BuildVectorOpds) + OpsChanged |= + vectorizeRootInstruction(nullptr, Op, BB, R, TTI, PostponedInsts); + + if (isa(I)) { + const DataLayout &DL = BB->getModule()->getDataLayout(); + if (!R.canMapToVector(I->getType(), DL)) + continue; + + LLVM_DEBUG(dbgs() << "SLP: array mappable to vector: " << *I << "\n"); + // Aggregate value is unlikely to be processed in vector register. + OpsChanged |= tryToVectorizeList(BuildVectorOpds, R); + + } else if (isa(I)) { + SmallVector Mask; + if (all_of(BuildVectorOpds, + [](Value *V) { + return isa(V); + }) && + isFixedVectorShuffle(BuildVectorOpds, Mask)) + continue; + + LLVM_DEBUG(dbgs() << "SLP: array mappable to vector: " << *I << "\n"); + OpsChanged |= tryToVectorizeList(BuildVectorInsts, R); + } + // Try to vectorize postponed binops where reductions were not found. + OpsChanged |= tryToVectorize(PostponedInsts, R); } if (AtTerminator) { // Try to find reductions first. diff --git a/llvm/test/Transforms/SLPVectorizer/X86/redux-feed-buildvector.ll b/llvm/test/Transforms/SLPVectorizer/X86/redux-feed-buildvector.ll --- a/llvm/test/Transforms/SLPVectorizer/X86/redux-feed-buildvector.ll +++ b/llvm/test/Transforms/SLPVectorizer/X86/redux-feed-buildvector.ll @@ -10,102 +10,23 @@ define void @test(double* nocapture readonly %arg, double* nocapture readonly %arg1, double* nocapture %arg2) { ; CHECK-LABEL: @test( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[GEP1_0:%.*]] = getelementptr inbounds double, double* [[ARG:%.*]], i64 1 -; CHECK-NEXT: [[LD1_0:%.*]] = load double, double* [[GEP1_0]], align 8 +; CHECK-NEXT: [[TMP0:%.*]] = insertelement <8 x double*> poison, double* [[ARG:%.*]], i32 0 +; CHECK-NEXT: [[SHUFFLE:%.*]] = shufflevector <8 x double*> [[TMP0]], <8 x double*> poison, <8 x i32> zeroinitializer +; CHECK-NEXT: [[TMP1:%.*]] = getelementptr double, <8 x double*> [[SHUFFLE]], <8 x i64> ; CHECK-NEXT: [[GEP2_0:%.*]] = getelementptr inbounds double, double* [[ARG1:%.*]], i64 16 -; CHECK-NEXT: [[GEP1_1:%.*]] = getelementptr inbounds double, double* [[ARG]], i64 3 -; CHECK-NEXT: [[LD1_1:%.*]] = load double, double* [[GEP1_1]], align 8 -; CHECK-NEXT: [[GEP0_1:%.*]] = getelementptr inbounds double, double* [[ARG1]], i64 1 -; CHECK-NEXT: [[GEP2_1:%.*]] = getelementptr inbounds double, double* [[ARG1]], i64 17 -; CHECK-NEXT: [[GEP1_2:%.*]] = getelementptr inbounds double, double* [[ARG]], i64 5 -; CHECK-NEXT: [[LD1_2:%.*]] = load double, double* [[GEP1_2]], align 8 -; CHECK-NEXT: [[GEP0_2:%.*]] = getelementptr inbounds double, double* [[ARG1]], i64 2 -; CHECK-NEXT: [[GEP2_2:%.*]] = getelementptr inbounds double, double* [[ARG1]], i64 18 -; CHECK-NEXT: [[GEP1_3:%.*]] = getelementptr inbounds double, double* [[ARG]], i64 7 -; CHECK-NEXT: [[LD1_3:%.*]] = load double, double* [[GEP1_3]], align 8 -; CHECK-NEXT: [[GEP0_3:%.*]] = getelementptr inbounds double, double* [[ARG1]], i64 3 -; CHECK-NEXT: [[GEP2_3:%.*]] = getelementptr inbounds double, double* [[ARG1]], i64 19 -; CHECK-NEXT: [[GEP1_4:%.*]] = getelementptr inbounds double, double* [[ARG]], i64 9 -; CHECK-NEXT: [[LD1_4:%.*]] = load double, double* [[GEP1_4]], align 8 -; CHECK-NEXT: [[GEP0_4:%.*]] = getelementptr inbounds double, double* [[ARG1]], i64 4 -; CHECK-NEXT: [[GEP2_4:%.*]] = getelementptr inbounds double, double* [[ARG1]], i64 20 -; CHECK-NEXT: [[GEP1_5:%.*]] = getelementptr inbounds double, double* [[ARG]], i64 11 -; CHECK-NEXT: [[LD1_5:%.*]] = load double, double* [[GEP1_5]], align 8 -; CHECK-NEXT: [[GEP0_5:%.*]] = getelementptr inbounds double, double* [[ARG1]], i64 5 -; CHECK-NEXT: [[GEP2_5:%.*]] = getelementptr inbounds double, double* [[ARG1]], i64 21 -; CHECK-NEXT: [[GEP1_6:%.*]] = getelementptr inbounds double, double* [[ARG]], i64 13 -; CHECK-NEXT: [[LD1_6:%.*]] = load double, double* [[GEP1_6]], align 8 -; CHECK-NEXT: [[GEP0_6:%.*]] = getelementptr inbounds double, double* [[ARG1]], i64 6 -; CHECK-NEXT: [[GEP2_6:%.*]] = getelementptr inbounds double, double* [[ARG1]], i64 22 -; CHECK-NEXT: [[GEP1_7:%.*]] = getelementptr inbounds double, double* [[ARG]], i64 15 -; CHECK-NEXT: [[LD1_7:%.*]] = load double, double* [[GEP1_7]], align 8 -; CHECK-NEXT: [[GEP0_7:%.*]] = getelementptr inbounds double, double* [[ARG1]], i64 7 -; CHECK-NEXT: [[GEP2_7:%.*]] = getelementptr inbounds double, double* [[ARG1]], i64 23 -; CHECK-NEXT: [[LD0_0:%.*]] = load double, double* [[ARG1]], align 8 -; CHECK-NEXT: [[LD2_0:%.*]] = load double, double* [[GEP2_0]], align 8 -; CHECK-NEXT: [[TMP0:%.*]] = insertelement <2 x double> poison, double [[LD0_0]], i32 0 -; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x double> [[TMP0]], double [[LD2_0]], i32 1 -; CHECK-NEXT: [[TMP2:%.*]] = insertelement <2 x double> poison, double [[LD1_0]], i32 0 -; CHECK-NEXT: [[TMP3:%.*]] = insertelement <2 x double> [[TMP2]], double [[LD1_0]], i32 1 -; CHECK-NEXT: [[TMP4:%.*]] = fmul fast <2 x double> [[TMP1]], [[TMP3]] -; CHECK-NEXT: [[LD0_1:%.*]] = load double, double* [[GEP0_1]], align 8 -; CHECK-NEXT: [[LD2_1:%.*]] = load double, double* [[GEP2_1]], align 8 -; CHECK-NEXT: [[TMP5:%.*]] = insertelement <2 x double> poison, double [[LD0_1]], i32 0 -; CHECK-NEXT: [[TMP6:%.*]] = insertelement <2 x double> [[TMP5]], double [[LD2_1]], i32 1 -; CHECK-NEXT: [[TMP7:%.*]] = insertelement <2 x double> poison, double [[LD1_1]], i32 0 -; CHECK-NEXT: [[TMP8:%.*]] = insertelement <2 x double> [[TMP7]], double [[LD1_1]], i32 1 -; CHECK-NEXT: [[TMP9:%.*]] = fmul fast <2 x double> [[TMP6]], [[TMP8]] -; CHECK-NEXT: [[TMP10:%.*]] = fadd fast <2 x double> [[TMP4]], [[TMP9]] -; CHECK-NEXT: [[LD0_2:%.*]] = load double, double* [[GEP0_2]], align 8 -; CHECK-NEXT: [[LD2_2:%.*]] = load double, double* [[GEP2_2]], align 8 -; CHECK-NEXT: [[TMP11:%.*]] = insertelement <2 x double> poison, double [[LD0_2]], i32 0 -; CHECK-NEXT: [[TMP12:%.*]] = insertelement <2 x double> [[TMP11]], double [[LD2_2]], i32 1 -; CHECK-NEXT: [[TMP13:%.*]] = insertelement <2 x double> poison, double [[LD1_2]], i32 0 -; CHECK-NEXT: [[TMP14:%.*]] = insertelement <2 x double> [[TMP13]], double [[LD1_2]], i32 1 -; CHECK-NEXT: [[TMP15:%.*]] = fmul fast <2 x double> [[TMP12]], [[TMP14]] -; CHECK-NEXT: [[TMP16:%.*]] = fadd fast <2 x double> [[TMP10]], [[TMP15]] -; CHECK-NEXT: [[LD0_3:%.*]] = load double, double* [[GEP0_3]], align 8 -; CHECK-NEXT: [[LD2_3:%.*]] = load double, double* [[GEP2_3]], align 8 -; CHECK-NEXT: [[TMP17:%.*]] = insertelement <2 x double> poison, double [[LD0_3]], i32 0 -; CHECK-NEXT: [[TMP18:%.*]] = insertelement <2 x double> [[TMP17]], double [[LD2_3]], i32 1 -; CHECK-NEXT: [[TMP19:%.*]] = insertelement <2 x double> poison, double [[LD1_3]], i32 0 -; CHECK-NEXT: [[TMP20:%.*]] = insertelement <2 x double> [[TMP19]], double [[LD1_3]], i32 1 -; CHECK-NEXT: [[TMP21:%.*]] = fmul fast <2 x double> [[TMP18]], [[TMP20]] -; CHECK-NEXT: [[TMP22:%.*]] = fadd fast <2 x double> [[TMP16]], [[TMP21]] -; CHECK-NEXT: [[LD0_4:%.*]] = load double, double* [[GEP0_4]], align 8 -; CHECK-NEXT: [[LD2_4:%.*]] = load double, double* [[GEP2_4]], align 8 -; CHECK-NEXT: [[TMP23:%.*]] = insertelement <2 x double> poison, double [[LD0_4]], i32 0 -; CHECK-NEXT: [[TMP24:%.*]] = insertelement <2 x double> [[TMP23]], double [[LD2_4]], i32 1 -; CHECK-NEXT: [[TMP25:%.*]] = insertelement <2 x double> poison, double [[LD1_4]], i32 0 -; CHECK-NEXT: [[TMP26:%.*]] = insertelement <2 x double> [[TMP25]], double [[LD1_4]], i32 1 -; CHECK-NEXT: [[TMP27:%.*]] = fmul fast <2 x double> [[TMP24]], [[TMP26]] -; CHECK-NEXT: [[TMP28:%.*]] = fadd fast <2 x double> [[TMP22]], [[TMP27]] -; CHECK-NEXT: [[LD0_5:%.*]] = load double, double* [[GEP0_5]], align 8 -; CHECK-NEXT: [[LD2_5:%.*]] = load double, double* [[GEP2_5]], align 8 -; CHECK-NEXT: [[TMP29:%.*]] = insertelement <2 x double> poison, double [[LD0_5]], i32 0 -; CHECK-NEXT: [[TMP30:%.*]] = insertelement <2 x double> [[TMP29]], double [[LD2_5]], i32 1 -; CHECK-NEXT: [[TMP31:%.*]] = insertelement <2 x double> poison, double [[LD1_5]], i32 0 -; CHECK-NEXT: [[TMP32:%.*]] = insertelement <2 x double> [[TMP31]], double [[LD1_5]], i32 1 -; CHECK-NEXT: [[TMP33:%.*]] = fmul fast <2 x double> [[TMP30]], [[TMP32]] -; CHECK-NEXT: [[TMP34:%.*]] = fadd fast <2 x double> [[TMP28]], [[TMP33]] -; CHECK-NEXT: [[LD0_6:%.*]] = load double, double* [[GEP0_6]], align 8 -; CHECK-NEXT: [[LD2_6:%.*]] = load double, double* [[GEP2_6]], align 8 -; CHECK-NEXT: [[TMP35:%.*]] = insertelement <2 x double> poison, double [[LD0_6]], i32 0 -; CHECK-NEXT: [[TMP36:%.*]] = insertelement <2 x double> [[TMP35]], double [[LD2_6]], i32 1 -; CHECK-NEXT: [[TMP37:%.*]] = insertelement <2 x double> poison, double [[LD1_6]], i32 0 -; CHECK-NEXT: [[TMP38:%.*]] = insertelement <2 x double> [[TMP37]], double [[LD1_6]], i32 1 -; CHECK-NEXT: [[TMP39:%.*]] = fmul fast <2 x double> [[TMP36]], [[TMP38]] -; CHECK-NEXT: [[TMP40:%.*]] = fadd fast <2 x double> [[TMP34]], [[TMP39]] -; CHECK-NEXT: [[LD0_7:%.*]] = load double, double* [[GEP0_7]], align 8 -; CHECK-NEXT: [[LD2_7:%.*]] = load double, double* [[GEP2_7]], align 8 -; CHECK-NEXT: [[TMP41:%.*]] = insertelement <2 x double> poison, double [[LD0_7]], i32 0 -; CHECK-NEXT: [[TMP42:%.*]] = insertelement <2 x double> [[TMP41]], double [[LD2_7]], i32 1 -; CHECK-NEXT: [[TMP43:%.*]] = insertelement <2 x double> poison, double [[LD1_7]], i32 0 -; CHECK-NEXT: [[TMP44:%.*]] = insertelement <2 x double> [[TMP43]], double [[LD1_7]], i32 1 -; CHECK-NEXT: [[TMP45:%.*]] = fmul fast <2 x double> [[TMP42]], [[TMP44]] -; CHECK-NEXT: [[TMP46:%.*]] = fadd fast <2 x double> [[TMP40]], [[TMP45]] +; CHECK-NEXT: [[TMP2:%.*]] = call <8 x double> @llvm.masked.gather.v8f64.v8p0f64(<8 x double*> [[TMP1]], i32 8, <8 x i1> , <8 x double> undef) +; CHECK-NEXT: [[TMP3:%.*]] = bitcast double* [[ARG1]] to <8 x double>* +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x double>, <8 x double>* [[TMP3]], align 8 +; CHECK-NEXT: [[TMP5:%.*]] = fmul fast <8 x double> [[TMP4]], [[TMP2]] +; CHECK-NEXT: [[TMP6:%.*]] = call fast double @llvm.vector.reduce.fadd.v8f64(double -0.000000e+00, <8 x double> [[TMP5]]) +; CHECK-NEXT: [[TMP7:%.*]] = bitcast double* [[GEP2_0]] to <8 x double>* +; CHECK-NEXT: [[TMP8:%.*]] = load <8 x double>, <8 x double>* [[TMP7]], align 8 +; CHECK-NEXT: [[TMP9:%.*]] = fmul fast <8 x double> [[TMP8]], [[TMP2]] +; CHECK-NEXT: [[TMP10:%.*]] = call fast double @llvm.vector.reduce.fadd.v8f64(double -0.000000e+00, <8 x double> [[TMP9]]) +; CHECK-NEXT: [[I142:%.*]] = insertelement <2 x double> poison, double [[TMP6]], i64 0 +; CHECK-NEXT: [[I143:%.*]] = insertelement <2 x double> [[I142]], double [[TMP10]], i64 1 ; CHECK-NEXT: [[P:%.*]] = getelementptr inbounds double, double* [[ARG2:%.*]], <2 x i64> -; CHECK-NEXT: call void @llvm.masked.scatter.v2f64.v2p0f64(<2 x double> [[TMP46]], <2 x double*> [[P]], i32 8, <2 x i1> ) +; CHECK-NEXT: call void @llvm.masked.scatter.v2f64.v2p0f64(<2 x double> [[I143]], <2 x double*> [[P]], i32 8, <2 x i1> ) ; CHECK-NEXT: ret void ; entry: