diff --git a/llvm/include/llvm/Analysis/LoopAccessAnalysis.h b/llvm/include/llvm/Analysis/LoopAccessAnalysis.h --- a/llvm/include/llvm/Analysis/LoopAccessAnalysis.h +++ b/llvm/include/llvm/Analysis/LoopAccessAnalysis.h @@ -588,8 +588,9 @@ static bool blockNeedsPredication(BasicBlock *BB, Loop *TheLoop, DominatorTree *DT); - /// Returns true if the value V is uniform within the loop. - bool isUniform(Value *V) const; + /// Returns true if the value V is uniform within the loop. If \p VF is + /// provided, check if \p V is uniform across \p VF. + bool isUniform(Value *V, std::optional VF = std::nullopt) const; uint64_t getMaxSafeDepDistBytes() const { return MaxSafeDepDistBytes; } unsigned getNumStores() const { return NumStores; } diff --git a/llvm/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h b/llvm/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h --- a/llvm/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h +++ b/llvm/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h @@ -348,11 +348,12 @@ int isConsecutivePtr(Type *AccessTy, Value *Ptr) const; /// Returns true if the value V is uniform within the loop. - bool isUniform(Value *V) const; + bool isUniform(Value *V, std::optional VF = std::nullopt) const; /// A uniform memory op is a load or store which accesses the same memory /// location on all lanes. - bool isUniformMemOp(Instruction &I) const; + bool isUniformMemOp(Instruction &I, + std::optional VF = std::nullopt) const; /// Returns the information that we collected about runtime memory check. const RuntimePointerChecking *getRuntimePointerChecking() const { diff --git a/llvm/lib/Analysis/LoopAccessAnalysis.cpp b/llvm/lib/Analysis/LoopAccessAnalysis.cpp --- a/llvm/lib/Analysis/LoopAccessAnalysis.cpp +++ b/llvm/lib/Analysis/LoopAccessAnalysis.cpp @@ -2546,7 +2546,67 @@ return *Report; } -bool LoopAccessInfo::isUniform(Value *V) const { +namespace { +class SCEVAddRecRewriter : public SCEVRewriteVisitor { + /// Multiplier to be applied to the step of AddRecs in TheLoop. + unsigned StepMultiplier; + + /// Offset to be added to the AddRecs in TheLoop. + unsigned Offset; + + /// Loop for which to rewrite AddRecsFor. + Loop *TheLoop; + + /// Are all sub-expressions analyzable w.r.t. uniformity? + bool CanAnalyze = true; + +public: + SCEVAddRecRewriter(ScalarEvolution &SE, unsigned StepMultiplier, + unsigned Offset, Loop *TheLoop) + : SCEVRewriteVisitor(SE), StepMultiplier(StepMultiplier), Offset(Offset), + TheLoop(TheLoop) {} + + const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { + assert(Expr->getLoop() == TheLoop && + "addrec outside of TheLoop must be invariant"); + // Build a new AddRec by multiplying the step by StepMultiplier and adding + // Offset * Step to the resulting AddRec. + auto *Ty = Expr->getType(); + auto *StepC = SE.getConstant(Ty, StepMultiplier); + auto *OffsetC = SE.getConstant(Ty, Offset); + return SE.getAddExpr( + SE.getAddRecExpr(Expr->getStart(), + SE.getMulExpr(StepC, Expr->getStepRecurrence(SE)), + Expr->getLoop(), SCEV::FlagAnyWrap), + SE.getMulExpr(OffsetC, Expr->getStepRecurrence(SE))); + } + + const SCEV *visit(const SCEV *S) { + if (!CanAnalyze) + return S; + return SCEVRewriteVisitor::visit(S); + } + + const SCEV *visitUnknown(const SCEVUnknown *S) { + if (SE.isLoopInvariant(S, TheLoop)) + return S; + // The value could vary across iterations. + CanAnalyze = false; + return S; + } + + const SCEV *visitCouldNotCompute(const SCEVCouldNotCompute *S) { + // Could not analyze the expression. + CanAnalyze = false; + return S; + } + + bool canAnalyze() const { return CanAnalyze; } +}; + +} // namespace + +bool LoopAccessInfo::isUniform(Value *V, std::optional VF) const { auto *SE = PSE->getSE(); // Since we rely on SCEV for uniformity, if the type is not SCEVable, it is // never considered uniform. @@ -2554,7 +2614,43 @@ // trivially loop-invariant FP values to be considered uniform. if (!SE->isSCEVable(V->getType())) return false; - return (SE->isLoopInvariant(SE->getSCEV(V), TheLoop)); + const SCEV *S = SE->getSCEV(V); + if (SE->isLoopInvariant(S, TheLoop)) + return true; + if (!VF || VF->isScalable()) + return false; + if (auto *GEP = dyn_cast(V)) + return all_of(GEP->operands(), + [&](Value *Op) { return isUniform(Op, VF); }); + + // Rewrite AddRecs in TheLoop to step by VF and create 2 expressions, one + // computing the value of the first and one for the last vector lane. Consider + // V uniform if the SCEV expressions are equal. + SCEVAddRecRewriter FirstLaneRewriter(*SE, VF->getKnownMinValue(), 0, TheLoop); + auto *FirstLaneExpr = FirstLaneRewriter.visit(S); + if (!FirstLaneRewriter.canAnalyze()) + return false; + + SCEVAddRecRewriter LastLaneRewriter(*SE, VF->getKnownMinValue(), + VF->getKnownMinValue() - 1, TheLoop); + auto *LastLaneExpr = LastLaneRewriter.visit(S); + bool IsUniform = + LastLaneRewriter.canAnalyze() && FirstLaneExpr == LastLaneExpr; +#ifndef NDEBUG + if (IsUniform) { + // Make sure the expressions for all other lanes are also equal. + for (unsigned I = 1; I < VF->getKnownMinValue() - 1; ++I) { + SCEVAddRecRewriter IthLaneRewriter(*SE, VF->getKnownMinValue(), I, + TheLoop); + auto *IthLaneExpr = IthLaneRewriter.visit(S); + assert( + IthLaneRewriter.canAnalyze() && + SE->isKnownPredicate(CmpInst::ICMP_EQ, FirstLaneExpr, IthLaneExpr) && + "first and last lane are equal, but not all lanes in between"); + } + } +#endif + return IsUniform; } /// Find the operand of the GEP that should be checked for consecutive diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp --- a/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp @@ -471,11 +471,13 @@ return 0; } -bool LoopVectorizationLegality::isUniform(Value *V) const { - return LAI->isUniform(V); +bool LoopVectorizationLegality::isUniform( + Value *V, std::optional VF) const { + return LAI->isUniform(V, VF); } -bool LoopVectorizationLegality::isUniformMemOp(Instruction &I) const { +bool LoopVectorizationLegality::isUniformMemOp( + Instruction &I, std::optional VF) const { Value *Ptr = getLoadStorePointerOperand(&I); if (!Ptr) return false; @@ -483,7 +485,7 @@ // stores from being uniform. The current lowering simply doesn't handle // it; in particular, the cost model distinguishes scatter/gather from // scalar w/predication, and we currently rely on the scalar path. - return isUniform(Ptr) && !blockNeedsPredication(I.getParent()); + return isUniform(Ptr, VF) && !blockNeedsPredication(I.getParent()); } bool LoopVectorizationLegality::canVectorizeOuterLoop() { diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -4725,7 +4725,7 @@ // Return true if all lanes perform the same memory operation, and we can // thus chose to execute only one. auto isUniformMemOpUse = [&](Instruction *I) { - if (!Legal->isUniformMemOp(*I)) + if (!Legal->isUniformMemOp(*I, VF)) return false; if (isa(I)) // Loading the same address always produces the same result - at least @@ -6549,7 +6549,7 @@ InstructionCost LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I, ElementCount VF) { - assert(Legal->isUniformMemOp(*I)); + assert(Legal->isUniformMemOp(*I, VF)); Type *ValTy = getLoadStoreType(I); auto *VectorTy = cast(ToVectorTy(ValTy, VF)); @@ -6925,7 +6925,7 @@ if (isa(&I) && isScalarWithPredication(&I, VF)) NumPredStores++; - if (Legal->isUniformMemOp(I)) { + if (Legal->isUniformMemOp(I, VF)) { auto isLegalToScalarize = [&]() { if (!VF.isScalable()) // Scalarization of fixed length vectors "just works". diff --git a/llvm/test/Transforms/LoopVectorize/X86/uniform_mem_op.ll b/llvm/test/Transforms/LoopVectorize/X86/uniform_mem_op.ll --- a/llvm/test/Transforms/LoopVectorize/X86/uniform_mem_op.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/uniform_mem_op.ll @@ -307,10 +307,10 @@ ; CHECK-NEXT: entry: ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]] ; CHECK: vector.memcheck: -; CHECK-NEXT: [[UGLYGEP:%.*]] = getelementptr i8, ptr [[B:%.*]], i64 4 -; CHECK-NEXT: [[UGLYGEP1:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 4 -; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[B]], [[UGLYGEP1]] -; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[A]], [[UGLYGEP]] +; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[B:%.*]], i64 4 +; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 4 +; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[B]], [[SCEVGEP1]] +; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[A]], [[SCEVGEP]] ; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] ; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: @@ -369,73 +369,75 @@ ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ , [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP50:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_PHI2:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP51:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP36:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI4:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP37:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI5:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP38:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI6:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP39:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[STEP_ADD:%.*]] = add <4 x i64> [[VEC_IND]], +; CHECK-NEXT: [[STEP_ADD1:%.*]] = add <4 x i64> [[STEP_ADD]], +; CHECK-NEXT: [[STEP_ADD2:%.*]] = add <4 x i64> [[STEP_ADD1]], ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 -; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 1 -; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 2 -; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 3 -; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[INDEX]], 4 -; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[INDEX]], 5 -; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[INDEX]], 6 -; CHECK-NEXT: [[TMP7:%.*]] = add i64 [[INDEX]], 7 -; CHECK-NEXT: [[TMP8:%.*]] = udiv i64 [[TMP0]], 8 -; CHECK-NEXT: [[TMP9:%.*]] = udiv i64 [[TMP1]], 8 -; CHECK-NEXT: [[TMP10:%.*]] = udiv i64 [[TMP2]], 8 -; CHECK-NEXT: [[TMP11:%.*]] = udiv i64 [[TMP3]], 8 -; CHECK-NEXT: [[TMP12:%.*]] = udiv i64 [[TMP4]], 8 -; CHECK-NEXT: [[TMP13:%.*]] = udiv i64 [[TMP5]], 8 -; CHECK-NEXT: [[TMP14:%.*]] = udiv i64 [[TMP6]], 8 -; CHECK-NEXT: [[TMP15:%.*]] = udiv i64 [[TMP7]], 8 -; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds i8, ptr [[TEST_BASE:%.*]], i64 [[TMP8]] -; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i8, ptr [[TEST_BASE]], i64 [[TMP9]] -; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds i8, ptr [[TEST_BASE]], i64 [[TMP10]] -; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds i8, ptr [[TEST_BASE]], i64 [[TMP11]] -; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds i8, ptr [[TEST_BASE]], i64 [[TMP12]] -; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds i8, ptr [[TEST_BASE]], i64 [[TMP13]] -; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds i8, ptr [[TEST_BASE]], i64 [[TMP14]] -; CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds i8, ptr [[TEST_BASE]], i64 [[TMP15]] -; CHECK-NEXT: [[TMP24:%.*]] = load i8, ptr [[TMP16]], align 1 -; CHECK-NEXT: [[TMP25:%.*]] = load i8, ptr [[TMP17]], align 1 -; CHECK-NEXT: [[TMP26:%.*]] = load i8, ptr [[TMP18]], align 1 -; CHECK-NEXT: [[TMP27:%.*]] = load i8, ptr [[TMP19]], align 1 -; CHECK-NEXT: [[TMP28:%.*]] = insertelement <4 x i8> poison, i8 [[TMP24]], i32 0 -; CHECK-NEXT: [[TMP29:%.*]] = insertelement <4 x i8> [[TMP28]], i8 [[TMP25]], i32 1 -; CHECK-NEXT: [[TMP30:%.*]] = insertelement <4 x i8> [[TMP29]], i8 [[TMP26]], i32 2 -; CHECK-NEXT: [[TMP31:%.*]] = insertelement <4 x i8> [[TMP30]], i8 [[TMP27]], i32 3 -; CHECK-NEXT: [[TMP32:%.*]] = load i8, ptr [[TMP20]], align 1 -; CHECK-NEXT: [[TMP33:%.*]] = load i8, ptr [[TMP21]], align 1 -; CHECK-NEXT: [[TMP34:%.*]] = load i8, ptr [[TMP22]], align 1 -; CHECK-NEXT: [[TMP35:%.*]] = load i8, ptr [[TMP23]], align 1 -; CHECK-NEXT: [[TMP36:%.*]] = insertelement <4 x i8> poison, i8 [[TMP32]], i32 0 -; CHECK-NEXT: [[TMP37:%.*]] = insertelement <4 x i8> [[TMP36]], i8 [[TMP33]], i32 1 -; CHECK-NEXT: [[TMP38:%.*]] = insertelement <4 x i8> [[TMP37]], i8 [[TMP34]], i32 2 -; CHECK-NEXT: [[TMP39:%.*]] = insertelement <4 x i8> [[TMP38]], i8 [[TMP35]], i32 3 -; CHECK-NEXT: [[TMP40:%.*]] = urem <4 x i64> [[VEC_IND]], -; CHECK-NEXT: [[TMP41:%.*]] = urem <4 x i64> [[STEP_ADD]], -; CHECK-NEXT: [[TMP42:%.*]] = trunc <4 x i64> [[TMP40]] to <4 x i8> -; CHECK-NEXT: [[TMP43:%.*]] = trunc <4 x i64> [[TMP41]] to <4 x i8> -; CHECK-NEXT: [[TMP44:%.*]] = lshr <4 x i8> [[TMP31]], [[TMP42]] -; CHECK-NEXT: [[TMP45:%.*]] = lshr <4 x i8> [[TMP39]], [[TMP43]] -; CHECK-NEXT: [[TMP46:%.*]] = and <4 x i8> [[TMP44]], -; CHECK-NEXT: [[TMP47:%.*]] = and <4 x i8> [[TMP45]], -; CHECK-NEXT: [[TMP48:%.*]] = zext <4 x i8> [[TMP46]] to <4 x i32> -; CHECK-NEXT: [[TMP49:%.*]] = zext <4 x i8> [[TMP47]] to <4 x i32> -; CHECK-NEXT: [[TMP50]] = add <4 x i32> [[VEC_PHI]], [[TMP48]] -; CHECK-NEXT: [[TMP51]] = add <4 x i32> [[VEC_PHI2]], [[TMP49]] -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 -; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[STEP_ADD]], -; CHECK-NEXT: [[TMP52:%.*]] = icmp eq i64 [[INDEX_NEXT]], 4096 -; CHECK-NEXT: br i1 [[TMP52]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] +; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 4 +; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 8 +; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 12 +; CHECK-NEXT: [[TMP4:%.*]] = udiv i64 [[TMP0]], 8 +; CHECK-NEXT: [[TMP5:%.*]] = udiv i64 [[TMP1]], 8 +; CHECK-NEXT: [[TMP6:%.*]] = udiv i64 [[TMP2]], 8 +; CHECK-NEXT: [[TMP7:%.*]] = udiv i64 [[TMP3]], 8 +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i8, ptr [[TEST_BASE:%.*]], i64 [[TMP4]] +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[TEST_BASE]], i64 [[TMP5]] +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[TEST_BASE]], i64 [[TMP6]] +; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[TEST_BASE]], i64 [[TMP7]] +; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr [[TMP8]], align 1 +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i8> poison, i8 [[TMP12]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i8> [[BROADCAST_SPLATINSERT]], <4 x i8> poison, <4 x i32> zeroinitializer +; CHECK-NEXT: [[TMP13:%.*]] = load i8, ptr [[TMP9]], align 1 +; CHECK-NEXT: [[BROADCAST_SPLATINSERT7:%.*]] = insertelement <4 x i8> poison, i8 [[TMP13]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT8:%.*]] = shufflevector <4 x i8> [[BROADCAST_SPLATINSERT7]], <4 x i8> poison, <4 x i32> zeroinitializer +; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr [[TMP10]], align 1 +; CHECK-NEXT: [[BROADCAST_SPLATINSERT9:%.*]] = insertelement <4 x i8> poison, i8 [[TMP14]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT10:%.*]] = shufflevector <4 x i8> [[BROADCAST_SPLATINSERT9]], <4 x i8> poison, <4 x i32> zeroinitializer +; CHECK-NEXT: [[TMP15:%.*]] = load i8, ptr [[TMP11]], align 1 +; CHECK-NEXT: [[BROADCAST_SPLATINSERT11:%.*]] = insertelement <4 x i8> poison, i8 [[TMP15]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT12:%.*]] = shufflevector <4 x i8> [[BROADCAST_SPLATINSERT11]], <4 x i8> poison, <4 x i32> zeroinitializer +; CHECK-NEXT: [[TMP16:%.*]] = urem <4 x i64> [[VEC_IND]], +; CHECK-NEXT: [[TMP17:%.*]] = urem <4 x i64> [[STEP_ADD]], +; CHECK-NEXT: [[TMP18:%.*]] = urem <4 x i64> [[STEP_ADD1]], +; CHECK-NEXT: [[TMP19:%.*]] = urem <4 x i64> [[STEP_ADD2]], +; CHECK-NEXT: [[TMP20:%.*]] = trunc <4 x i64> [[TMP16]] to <4 x i8> +; CHECK-NEXT: [[TMP21:%.*]] = trunc <4 x i64> [[TMP17]] to <4 x i8> +; CHECK-NEXT: [[TMP22:%.*]] = trunc <4 x i64> [[TMP18]] to <4 x i8> +; CHECK-NEXT: [[TMP23:%.*]] = trunc <4 x i64> [[TMP19]] to <4 x i8> +; CHECK-NEXT: [[TMP24:%.*]] = lshr <4 x i8> [[BROADCAST_SPLAT]], [[TMP20]] +; CHECK-NEXT: [[TMP25:%.*]] = lshr <4 x i8> [[BROADCAST_SPLAT8]], [[TMP21]] +; CHECK-NEXT: [[TMP26:%.*]] = lshr <4 x i8> [[BROADCAST_SPLAT10]], [[TMP22]] +; CHECK-NEXT: [[TMP27:%.*]] = lshr <4 x i8> [[BROADCAST_SPLAT12]], [[TMP23]] +; CHECK-NEXT: [[TMP28:%.*]] = and <4 x i8> [[TMP24]], +; CHECK-NEXT: [[TMP29:%.*]] = and <4 x i8> [[TMP25]], +; CHECK-NEXT: [[TMP30:%.*]] = and <4 x i8> [[TMP26]], +; CHECK-NEXT: [[TMP31:%.*]] = and <4 x i8> [[TMP27]], +; CHECK-NEXT: [[TMP32:%.*]] = zext <4 x i8> [[TMP28]] to <4 x i32> +; CHECK-NEXT: [[TMP33:%.*]] = zext <4 x i8> [[TMP29]] to <4 x i32> +; CHECK-NEXT: [[TMP34:%.*]] = zext <4 x i8> [[TMP30]] to <4 x i32> +; CHECK-NEXT: [[TMP35:%.*]] = zext <4 x i8> [[TMP31]] to <4 x i32> +; CHECK-NEXT: [[TMP36]] = add <4 x i32> [[VEC_PHI]], [[TMP32]] +; CHECK-NEXT: [[TMP37]] = add <4 x i32> [[VEC_PHI4]], [[TMP33]] +; CHECK-NEXT: [[TMP38]] = add <4 x i32> [[VEC_PHI5]], [[TMP34]] +; CHECK-NEXT: [[TMP39]] = add <4 x i32> [[VEC_PHI6]], [[TMP35]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 +; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[STEP_ADD2]], +; CHECK-NEXT: [[TMP40:%.*]] = icmp eq i64 [[INDEX_NEXT]], 4096 +; CHECK-NEXT: br i1 [[TMP40]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[TMP51]], [[TMP50]] -; CHECK-NEXT: [[TMP53:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX]]) +; CHECK-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[TMP37]], [[TMP36]] +; CHECK-NEXT: [[BIN_RDX13:%.*]] = add <4 x i32> [[TMP38]], [[BIN_RDX]] +; CHECK-NEXT: [[BIN_RDX14:%.*]] = add <4 x i32> [[TMP39]], [[BIN_RDX13]] +; CHECK-NEXT: [[TMP41:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX14]]) ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 4096, 4096 ; CHECK-NEXT: br i1 [[CMP_N]], label [[LOOP_EXIT:%.*]], label [[SCALAR_PH]] ; CHECK: scalar.ph: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 4096, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] -; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[TMP53]], [[MIDDLE_BLOCK]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[TMP41]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: br label [[LOOP:%.*]] ; CHECK: loop: ; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] @@ -453,7 +455,7 @@ ; CHECK-NEXT: [[EXIT:%.*]] = icmp ugt i64 [[IV]], 4094 ; CHECK-NEXT: br i1 [[EXIT]], label [[LOOP_EXIT]], label [[LOOP]], !llvm.loop [[LOOP20:![0-9]+]] ; CHECK: loop_exit: -; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i32 [ [[ACCUM_NEXT]], [[LOOP]] ], [ [[TMP53]], [[MIDDLE_BLOCK]] ] +; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i32 [ [[ACCUM_NEXT]], [[LOOP]] ], [ [[TMP41]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i32 [[ACCUM_NEXT_LCSSA]] ; entry: diff --git a/llvm/test/Transforms/LoopVectorize/pr47343-expander-lcssa-after-cfg-update.ll b/llvm/test/Transforms/LoopVectorize/pr47343-expander-lcssa-after-cfg-update.ll --- a/llvm/test/Transforms/LoopVectorize/pr47343-expander-lcssa-after-cfg-update.ll +++ b/llvm/test/Transforms/LoopVectorize/pr47343-expander-lcssa-after-cfg-update.ll @@ -64,7 +64,7 @@ ; CHECK-NEXT: store i8 10, ptr [[TMP1]], align 1 ; CHECK-NEXT: [[IV_NEXT]] = add nsw i32 [[IV]], 1 ; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV_NEXT]], 500 -; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP7:![0-9]+]] +; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: exit: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1.ll b/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1.ll --- a/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1.ll +++ b/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1.ll @@ -72,25 +72,19 @@ ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_IND:%.*]] = phi <2 x i64> [ , [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 -; CHECK-NEXT: [[TMP1:%.*]] = udiv <2 x i64> [[VEC_IND]], -; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x i64> [[TMP1]], i32 0 -; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[TMP2]] -; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x i64> [[TMP1]], i32 1 -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[TMP4]] -; CHECK-NEXT: [[TMP6:%.*]] = load i64, ptr [[TMP3]], align 8 -; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr [[TMP5]], align 8 -; CHECK-NEXT: [[TMP8:%.*]] = insertelement <2 x i64> poison, i64 [[TMP6]], i32 0 -; CHECK-NEXT: [[TMP9:%.*]] = insertelement <2 x i64> [[TMP8]], i64 [[TMP7]], i32 1 -; CHECK-NEXT: [[TMP10:%.*]] = add nsw <2 x i64> [[TMP9]], -; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[TMP0]] -; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i64, ptr [[TMP11]], i32 0 -; CHECK-NEXT: store <2 x i64> [[TMP10]], ptr [[TMP12]], align 8 +; CHECK-NEXT: [[TMP1:%.*]] = udiv i64 [[TMP0]], 2 +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[TMP1]] +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr [[TMP2]], align 8 +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i64> poison, i64 [[TMP3]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i64> [[BROADCAST_SPLATINSERT]], <2 x i64> poison, <2 x i32> zeroinitializer +; CHECK-NEXT: [[TMP4:%.*]] = add nsw <2 x i64> [[BROADCAST_SPLAT]], +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[TMP0]] +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i64, ptr [[TMP5]], i32 0 +; CHECK-NEXT: store <2 x i64> [[TMP4]], ptr [[TMP6]], align 8 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 -; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], -; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 -; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 +; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1000, 1000 ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] diff --git a/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction2.ll b/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction2.ll --- a/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction2.ll +++ b/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction2.ll @@ -90,29 +90,22 @@ ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_IND:%.*]] = phi <2 x i64> [ , [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_IND2:%.*]] = phi <2 x i64> [ , [[VECTOR_PH]] ], [ [[VEC_IND_NEXT3:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 -; CHECK-NEXT: [[TMP1:%.*]] = udiv <2 x i64> [[VEC_IND]], -; CHECK-NEXT: [[TMP2:%.*]] = udiv <2 x i64> [[VEC_IND2]], -; CHECK-NEXT: [[TMP3:%.*]] = add <2 x i64> [[TMP1]], [[TMP2]] -; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x i64> [[TMP3]], i32 0 +; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 0 +; CHECK-NEXT: [[TMP2:%.*]] = udiv i64 [[TMP1]], 2 +; CHECK-NEXT: [[TMP3:%.*]] = udiv i64 [[TMP0]], 2 +; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[TMP2]], [[TMP3]] ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[TMP4]] -; CHECK-NEXT: [[TMP6:%.*]] = extractelement <2 x i64> [[TMP3]], i32 1 -; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[TMP6]] -; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr [[TMP5]], align 8 -; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP7]], align 8 -; CHECK-NEXT: [[TMP10:%.*]] = insertelement <2 x i64> poison, i64 [[TMP8]], i32 0 -; CHECK-NEXT: [[TMP11:%.*]] = insertelement <2 x i64> [[TMP10]], i64 [[TMP9]], i32 1 -; CHECK-NEXT: [[TMP12:%.*]] = add nsw <2 x i64> [[TMP11]], -; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[TMP0]] -; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i64, ptr [[TMP13]], i32 0 -; CHECK-NEXT: store <2 x i64> [[TMP12]], ptr [[TMP14]], align 8 +; CHECK-NEXT: [[TMP6:%.*]] = load i64, ptr [[TMP5]], align 8 +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i64> poison, i64 [[TMP6]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i64> [[BROADCAST_SPLATINSERT]], <2 x i64> poison, <2 x i32> zeroinitializer +; CHECK-NEXT: [[TMP7:%.*]] = add nsw <2 x i64> [[BROADCAST_SPLAT]], +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[TMP1]] +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i64, ptr [[TMP8]], i32 0 +; CHECK-NEXT: store <2 x i64> [[TMP7]], ptr [[TMP9]], align 8 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 -; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], -; CHECK-NEXT: [[VEC_IND_NEXT3]] = add <2 x i64> [[VEC_IND2]], -; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 -; CHECK-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 +; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1000, 1000 ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]