diff --git a/llvm/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h b/llvm/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h --- a/llvm/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h +++ b/llvm/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h @@ -347,12 +347,17 @@ /// loop. Do not use after invoking 'createVectorizedLoopSkeleton' (PR34965). int isConsecutivePtr(Type *AccessTy, Value *Ptr) const; - /// Returns true if the value V is uniform within the loop. - bool isUniform(Value *V) const; + /// Returns true if the value V is uniform within the loop. If \p VF is + /// specified, then this also checks for uniformity across lanes for this + /// specific vector factor. + bool isUniform(Value *V, std::optional VF = std::nullopt) const; /// A uniform memory op is a load or store which accesses the same memory - /// location on all lanes. - bool isUniformMemOp(Instruction &I) const; + /// location on all lanes. If \p VF is specified, then this does additional + /// checks to check if it is uniform across lanes for this specific vector + /// factor. + bool isUniformMemOp(Instruction &I, + std::optional VF = std::nullopt) const; /// Returns the information that we collected about runtime memory check. const RuntimePointerChecking *getRuntimePointerChecking() const { @@ -463,6 +468,11 @@ void addInductionPhi(PHINode *Phi, const InductionDescriptor &ID, SmallPtrSetImpl &AllowedExit); + /// \Returns true if \p V is uniform across all lanes for a specific \p VF. + /// This explores the operands recursively until \p MaxDepth, and returns true + /// if all operands are indeed uniform across VF. + bool isUniformAcrossVF(Value *V, uint32_t VF, int MaxDepth = 6) const; + /// The loop that we evaluate. Loop *TheLoop; diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp --- a/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp @@ -78,6 +78,11 @@ "Scalable vectorization is available and favored when the " "cost is inconclusive."))); +static cl::opt EnableUniformAcrossVF( + "enable-uniform-across-vf", cl::init(true), cl::Hidden, + cl::desc("Enables analaysis that checks for uniforms across all lanes with " + "a specific VF.")); + /// Maximum vectorization interleave count. static const unsigned MaxInterleaveFactor = 16; @@ -471,11 +476,73 @@ return 0; } -bool LoopVectorizationLegality::isUniform(Value *V) const { - return LAI->isUniform(V); +bool LoopVectorizationLegality::isUniformAcrossVF(Value *V, uint32_t VF, + int MaxDepth) const { + --MaxDepth; + if (MaxDepth == 0) + return false; + if (isa(V) || isa(V) || isa(V)) + return true; + if (Instruction *I = dyn_cast(V)) { + Value *Op1; + Value *Op2; + if (match(V, m_BinOp(m_Value(Op1), m_Value(Op2)))) { + auto Opcode = cast(V)->getOpcode(); + if (Opcode == Instruction::UDiv && isInductionVariable(Op1)) { + // Return true if divisor/step is a constant and a multiple of VF. + if (auto *Divisor = dyn_cast(Op2)) { + auto *VFC = ConstantInt::get(Type::getInt64Ty(V->getContext()), VF); + bool IsIdentity = + Opcode == Instruction::UDiv && Divisor->isOneValue(); + ScalarEvolution &SE = *PSE.getSE(); + auto *IV = TheLoop->getInductionVariable(SE); + assert(IV != nullptr && "Missing IV!"); + std::optional LoopBounds = + Loop::LoopBounds::getBounds(*TheLoop, *IV, SE); + assert(LoopBounds && "Missing LoopBounds!"); + ConstantInt *Step = dyn_cast(LoopBounds->getStepValue()); + // (Divisor / Step) % VF == 0 + return Step != nullptr && !IsIdentity && + Divisor->getValue() + .udiv(APInt(Step->getValue())) + .srem(VFC->getValue()) + .isZero(); + } + } + return isUniformAcrossVF(Op1, VF, MaxDepth) && + isUniformAcrossVF(Op2, VF, MaxDepth); + } + Value *Ptr; + if (match(V, m_Load(m_Value(Ptr)))) + return isUniformAcrossVF(Ptr, VF, MaxDepth); + Value *Val; + if (match(V, m_Store(m_Value(Val), m_Value(Ptr)))) + return isUniformAcrossVF(Ptr, VF, MaxDepth) && + isUniformAcrossVF(Val, VF, MaxDepth); + if (auto *GEP = dyn_cast(V)) { + if (!isUniformAcrossVF(GEP->getPointerOperand(), VF, MaxDepth)) + return false; + for (auto &IndexUse : GEP->indices()) + if (!isUniformAcrossVF(IndexUse.get(), VF, MaxDepth)) + return false; + return true; + } + } + return false; +} + +bool LoopVectorizationLegality::isUniform( + Value *V, std::optional VF) const { + if (LAI->isUniform(V)) + return true; + if (EnableUniformAcrossVF && VF && !VF->isScalable() && + isUniformAcrossVF(V, VF->getFixedValue())) + return true; + return false; } -bool LoopVectorizationLegality::isUniformMemOp(Instruction &I) const { +bool LoopVectorizationLegality::isUniformMemOp( + Instruction &I, std::optional VF) const { Value *Ptr = getLoadStorePointerOperand(&I); if (!Ptr) return false; @@ -483,7 +550,7 @@ // stores from being uniform. The current lowering simply doesn't handle // it; in particular, the cost model distinguishes scatter/gather from // scalar w/predication, and we currently rely on the scalar path. - return isUniform(Ptr) && !blockNeedsPredication(I.getParent()); + return isUniform(Ptr, VF) && !blockNeedsPredication(I.getParent()); } bool LoopVectorizationLegality::canVectorizeOuterLoop() { diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -4712,15 +4712,20 @@ // Return true if all lanes perform the same memory operation, and we can // thus chose to execute only one. - auto isUniformMemOpUse = [&](Instruction *I) { - if (!Legal->isUniformMemOp(*I)) + auto isUniformMemOpUse = [&](Instruction *I, ElementCount VF) { + if (!Legal->isUniformMemOp(*I, VF)) return false; if (isa(I)) // Loading the same address always produces the same result - at least // assuming aliasing and ordering which have already been checked. return true; // Storing the same value on every iteration. - return TheLoop->isLoopInvariant(cast(I)->getValueOperand()); + if (TheLoop->isLoopInvariant(cast(I)->getValueOperand())) + return true; + // The value may not be invariant, but may be uniform within the VF. + if (Legal->isUniform(cast(I)->getValueOperand(), VF)) + return true; + return false; }; auto isUniformDecision = [&](Instruction *I, ElementCount VF) { @@ -4728,7 +4733,7 @@ assert(WideningDecision != CM_Unknown && "Widening decision should be ready at this moment"); - if (isUniformMemOpUse(I)) + if (isUniformMemOpUse(I, VF)) return true; return (WideningDecision == CM_Widen || @@ -4789,7 +4794,7 @@ if (!Ptr) continue; - if (isUniformMemOpUse(&I)) + if (isUniformMemOpUse(&I, VF)) addToWorklistIfAllowed(&I); if (isVectorizedMemAccessUse(&I, Ptr)) { @@ -6527,7 +6532,7 @@ InstructionCost LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I, ElementCount VF) { - assert(Legal->isUniformMemOp(*I)); + assert(Legal->isUniformMemOp(*I, VF)); Type *ValTy = getLoadStoreType(I); auto *VectorTy = cast(ToVectorTy(ValTy, VF)); @@ -6903,7 +6908,7 @@ if (isa(&I) && isScalarWithPredication(&I, VF)) NumPredStores++; - if (Legal->isUniformMemOp(I)) { + if (Legal->isUniformMemOp(I, VF)) { auto isLegalToScalarize = [&]() { if (!VF.isScalable()) // Scalarization of fixed length vectors "just works". diff --git a/llvm/test/Transforms/LoopVectorize/X86/uniform_across_vf.ll b/llvm/test/Transforms/LoopVectorize/X86/uniform_across_vf.ll --- a/llvm/test/Transforms/LoopVectorize/X86/uniform_across_vf.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/uniform_across_vf.ll @@ -73,25 +73,19 @@ ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_IND:%.*]] = phi <2 x i64> [ , [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 -; CHECK-NEXT: [[TMP1:%.*]] = udiv <2 x i64> [[VEC_IND]], -; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x i64> [[TMP1]], i32 0 -; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i64, ptr [[SRC]], i64 [[TMP2]] -; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x i64> [[TMP1]], i32 1 -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[SRC]], i64 [[TMP4]] -; CHECK-NEXT: [[TMP6:%.*]] = load i64, ptr [[TMP3]], align 8 -; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr [[TMP5]], align 8 -; CHECK-NEXT: [[TMP8:%.*]] = insertelement <2 x i64> poison, i64 [[TMP6]], i32 0 -; CHECK-NEXT: [[TMP9:%.*]] = insertelement <2 x i64> [[TMP8]], i64 [[TMP7]], i32 1 -; CHECK-NEXT: [[TMP10:%.*]] = add nsw <2 x i64> [[TMP9]], -; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i64, ptr [[DST]], i64 [[TMP0]] -; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i64, ptr [[TMP11]], i32 0 -; CHECK-NEXT: store <2 x i64> [[TMP10]], ptr [[TMP12]], align 8 +; CHECK-NEXT: [[TMP1:%.*]] = udiv i64 [[TMP0]], 2 +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i64, ptr [[SRC]], i64 [[TMP1]] +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr [[TMP2]], align 8 +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i64> poison, i64 [[TMP3]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i64> [[BROADCAST_SPLATINSERT]], <2 x i64> poison, <2 x i32> zeroinitializer +; CHECK-NEXT: [[TMP4:%.*]] = add nsw <2 x i64> [[BROADCAST_SPLAT]], +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[DST]], i64 [[TMP0]] +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i64, ptr [[TMP5]], i32 0 +; CHECK-NEXT: store <2 x i64> [[TMP4]], ptr [[TMP6]], align 8 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 -; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], -; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 -; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 +; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1000, 1000 ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] @@ -279,21 +273,14 @@ ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_IND:%.*]] = phi <2 x i64> [ , [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP0:%.*]] = udiv <2 x i64> [[VEC_IND]], -; CHECK-NEXT: [[TMP1:%.*]] = add nsw <2 x i64> [[TMP0]], -; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x i64> [[TMP0]], i32 0 -; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i64, ptr [[DST]], i64 [[TMP2]] -; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x i64> [[TMP0]], i32 1 -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[DST]], i64 [[TMP4]] -; CHECK-NEXT: [[TMP6:%.*]] = extractelement <2 x i64> [[TMP1]], i32 0 -; CHECK-NEXT: store i64 [[TMP6]], ptr [[TMP3]], align 8 -; CHECK-NEXT: [[TMP7:%.*]] = extractelement <2 x i64> [[TMP1]], i32 1 -; CHECK-NEXT: store i64 [[TMP7]], ptr [[TMP5]], align 8 +; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 +; CHECK-NEXT: [[TMP1:%.*]] = udiv i64 [[TMP0]], 2 +; CHECK-NEXT: [[TMP2:%.*]] = add nsw i64 [[TMP1]], 42 +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i64, ptr [[DST]], i64 [[TMP1]] +; CHECK-NEXT: store i64 [[TMP2]], ptr [[TMP3]], align 8 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 -; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], -; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 -; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 +; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1000, 1000 ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] diff --git a/llvm/test/Transforms/LoopVectorize/X86/uniform_mem_op.ll b/llvm/test/Transforms/LoopVectorize/X86/uniform_mem_op.ll --- a/llvm/test/Transforms/LoopVectorize/X86/uniform_mem_op.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/uniform_mem_op.ll @@ -307,10 +307,10 @@ ; CHECK-NEXT: entry: ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]] ; CHECK: vector.memcheck: -; CHECK-NEXT: [[UGLYGEP:%.*]] = getelementptr i8, ptr [[B:%.*]], i64 4 -; CHECK-NEXT: [[UGLYGEP1:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 4 -; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[B]], [[UGLYGEP1]] -; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[A]], [[UGLYGEP]] +; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[B:%.*]], i64 4 +; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 4 +; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[B]], [[SCEVGEP1]] +; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[A]], [[SCEVGEP]] ; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] ; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: @@ -319,6 +319,9 @@ ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4, !alias.scope !12 ; CHECK-NEXT: store i32 [[TMP0]], ptr [[B]], align 4, !alias.scope !15, !noalias !12 +; CHECK-NEXT: store i32 [[TMP0]], ptr [[B]], align 4, !alias.scope !15, !noalias !12 +; CHECK-NEXT: store i32 [[TMP0]], ptr [[B]], align 4, !alias.scope !15, !noalias !12 +; CHECK-NEXT: store i32 [[TMP0]], ptr [[B]], align 4, !alias.scope !15, !noalias !12 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i64 [[INDEX_NEXT]], 4096 ; CHECK-NEXT: br i1 [[TMP1]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] @@ -369,73 +372,75 @@ ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ , [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP50:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_PHI2:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP51:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP36:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI4:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP37:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI5:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP38:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI6:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP39:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[STEP_ADD:%.*]] = add <4 x i64> [[VEC_IND]], +; CHECK-NEXT: [[STEP_ADD1:%.*]] = add <4 x i64> [[STEP_ADD]], +; CHECK-NEXT: [[STEP_ADD2:%.*]] = add <4 x i64> [[STEP_ADD1]], ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 -; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 1 -; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 2 -; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 3 -; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[INDEX]], 4 -; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[INDEX]], 5 -; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[INDEX]], 6 -; CHECK-NEXT: [[TMP7:%.*]] = add i64 [[INDEX]], 7 -; CHECK-NEXT: [[TMP8:%.*]] = udiv i64 [[TMP0]], 8 -; CHECK-NEXT: [[TMP9:%.*]] = udiv i64 [[TMP1]], 8 -; CHECK-NEXT: [[TMP10:%.*]] = udiv i64 [[TMP2]], 8 -; CHECK-NEXT: [[TMP11:%.*]] = udiv i64 [[TMP3]], 8 -; CHECK-NEXT: [[TMP12:%.*]] = udiv i64 [[TMP4]], 8 -; CHECK-NEXT: [[TMP13:%.*]] = udiv i64 [[TMP5]], 8 -; CHECK-NEXT: [[TMP14:%.*]] = udiv i64 [[TMP6]], 8 -; CHECK-NEXT: [[TMP15:%.*]] = udiv i64 [[TMP7]], 8 -; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds i8, ptr [[TEST_BASE:%.*]], i64 [[TMP8]] -; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i8, ptr [[TEST_BASE]], i64 [[TMP9]] -; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds i8, ptr [[TEST_BASE]], i64 [[TMP10]] -; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds i8, ptr [[TEST_BASE]], i64 [[TMP11]] -; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds i8, ptr [[TEST_BASE]], i64 [[TMP12]] -; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds i8, ptr [[TEST_BASE]], i64 [[TMP13]] -; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds i8, ptr [[TEST_BASE]], i64 [[TMP14]] -; CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds i8, ptr [[TEST_BASE]], i64 [[TMP15]] -; CHECK-NEXT: [[TMP24:%.*]] = load i8, ptr [[TMP16]], align 1 -; CHECK-NEXT: [[TMP25:%.*]] = load i8, ptr [[TMP17]], align 1 -; CHECK-NEXT: [[TMP26:%.*]] = load i8, ptr [[TMP18]], align 1 -; CHECK-NEXT: [[TMP27:%.*]] = load i8, ptr [[TMP19]], align 1 -; CHECK-NEXT: [[TMP28:%.*]] = insertelement <4 x i8> poison, i8 [[TMP24]], i32 0 -; CHECK-NEXT: [[TMP29:%.*]] = insertelement <4 x i8> [[TMP28]], i8 [[TMP25]], i32 1 -; CHECK-NEXT: [[TMP30:%.*]] = insertelement <4 x i8> [[TMP29]], i8 [[TMP26]], i32 2 -; CHECK-NEXT: [[TMP31:%.*]] = insertelement <4 x i8> [[TMP30]], i8 [[TMP27]], i32 3 -; CHECK-NEXT: [[TMP32:%.*]] = load i8, ptr [[TMP20]], align 1 -; CHECK-NEXT: [[TMP33:%.*]] = load i8, ptr [[TMP21]], align 1 -; CHECK-NEXT: [[TMP34:%.*]] = load i8, ptr [[TMP22]], align 1 -; CHECK-NEXT: [[TMP35:%.*]] = load i8, ptr [[TMP23]], align 1 -; CHECK-NEXT: [[TMP36:%.*]] = insertelement <4 x i8> poison, i8 [[TMP32]], i32 0 -; CHECK-NEXT: [[TMP37:%.*]] = insertelement <4 x i8> [[TMP36]], i8 [[TMP33]], i32 1 -; CHECK-NEXT: [[TMP38:%.*]] = insertelement <4 x i8> [[TMP37]], i8 [[TMP34]], i32 2 -; CHECK-NEXT: [[TMP39:%.*]] = insertelement <4 x i8> [[TMP38]], i8 [[TMP35]], i32 3 -; CHECK-NEXT: [[TMP40:%.*]] = urem <4 x i64> [[VEC_IND]], -; CHECK-NEXT: [[TMP41:%.*]] = urem <4 x i64> [[STEP_ADD]], -; CHECK-NEXT: [[TMP42:%.*]] = trunc <4 x i64> [[TMP40]] to <4 x i8> -; CHECK-NEXT: [[TMP43:%.*]] = trunc <4 x i64> [[TMP41]] to <4 x i8> -; CHECK-NEXT: [[TMP44:%.*]] = lshr <4 x i8> [[TMP31]], [[TMP42]] -; CHECK-NEXT: [[TMP45:%.*]] = lshr <4 x i8> [[TMP39]], [[TMP43]] -; CHECK-NEXT: [[TMP46:%.*]] = and <4 x i8> [[TMP44]], -; CHECK-NEXT: [[TMP47:%.*]] = and <4 x i8> [[TMP45]], -; CHECK-NEXT: [[TMP48:%.*]] = zext <4 x i8> [[TMP46]] to <4 x i32> -; CHECK-NEXT: [[TMP49:%.*]] = zext <4 x i8> [[TMP47]] to <4 x i32> -; CHECK-NEXT: [[TMP50]] = add <4 x i32> [[VEC_PHI]], [[TMP48]] -; CHECK-NEXT: [[TMP51]] = add <4 x i32> [[VEC_PHI2]], [[TMP49]] -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 -; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[STEP_ADD]], -; CHECK-NEXT: [[TMP52:%.*]] = icmp eq i64 [[INDEX_NEXT]], 4096 -; CHECK-NEXT: br i1 [[TMP52]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] +; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 4 +; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 8 +; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 12 +; CHECK-NEXT: [[TMP4:%.*]] = udiv i64 [[TMP0]], 8 +; CHECK-NEXT: [[TMP5:%.*]] = udiv i64 [[TMP1]], 8 +; CHECK-NEXT: [[TMP6:%.*]] = udiv i64 [[TMP2]], 8 +; CHECK-NEXT: [[TMP7:%.*]] = udiv i64 [[TMP3]], 8 +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i8, ptr [[TEST_BASE:%.*]], i64 [[TMP4]] +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[TEST_BASE]], i64 [[TMP5]] +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[TEST_BASE]], i64 [[TMP6]] +; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[TEST_BASE]], i64 [[TMP7]] +; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr [[TMP8]], align 1 +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i8> poison, i8 [[TMP12]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i8> [[BROADCAST_SPLATINSERT]], <4 x i8> poison, <4 x i32> zeroinitializer +; CHECK-NEXT: [[TMP13:%.*]] = load i8, ptr [[TMP9]], align 1 +; CHECK-NEXT: [[BROADCAST_SPLATINSERT7:%.*]] = insertelement <4 x i8> poison, i8 [[TMP13]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT8:%.*]] = shufflevector <4 x i8> [[BROADCAST_SPLATINSERT7]], <4 x i8> poison, <4 x i32> zeroinitializer +; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr [[TMP10]], align 1 +; CHECK-NEXT: [[BROADCAST_SPLATINSERT9:%.*]] = insertelement <4 x i8> poison, i8 [[TMP14]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT10:%.*]] = shufflevector <4 x i8> [[BROADCAST_SPLATINSERT9]], <4 x i8> poison, <4 x i32> zeroinitializer +; CHECK-NEXT: [[TMP15:%.*]] = load i8, ptr [[TMP11]], align 1 +; CHECK-NEXT: [[BROADCAST_SPLATINSERT11:%.*]] = insertelement <4 x i8> poison, i8 [[TMP15]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT12:%.*]] = shufflevector <4 x i8> [[BROADCAST_SPLATINSERT11]], <4 x i8> poison, <4 x i32> zeroinitializer +; CHECK-NEXT: [[TMP16:%.*]] = urem <4 x i64> [[VEC_IND]], +; CHECK-NEXT: [[TMP17:%.*]] = urem <4 x i64> [[STEP_ADD]], +; CHECK-NEXT: [[TMP18:%.*]] = urem <4 x i64> [[STEP_ADD1]], +; CHECK-NEXT: [[TMP19:%.*]] = urem <4 x i64> [[STEP_ADD2]], +; CHECK-NEXT: [[TMP20:%.*]] = trunc <4 x i64> [[TMP16]] to <4 x i8> +; CHECK-NEXT: [[TMP21:%.*]] = trunc <4 x i64> [[TMP17]] to <4 x i8> +; CHECK-NEXT: [[TMP22:%.*]] = trunc <4 x i64> [[TMP18]] to <4 x i8> +; CHECK-NEXT: [[TMP23:%.*]] = trunc <4 x i64> [[TMP19]] to <4 x i8> +; CHECK-NEXT: [[TMP24:%.*]] = lshr <4 x i8> [[BROADCAST_SPLAT]], [[TMP20]] +; CHECK-NEXT: [[TMP25:%.*]] = lshr <4 x i8> [[BROADCAST_SPLAT8]], [[TMP21]] +; CHECK-NEXT: [[TMP26:%.*]] = lshr <4 x i8> [[BROADCAST_SPLAT10]], [[TMP22]] +; CHECK-NEXT: [[TMP27:%.*]] = lshr <4 x i8> [[BROADCAST_SPLAT12]], [[TMP23]] +; CHECK-NEXT: [[TMP28:%.*]] = and <4 x i8> [[TMP24]], +; CHECK-NEXT: [[TMP29:%.*]] = and <4 x i8> [[TMP25]], +; CHECK-NEXT: [[TMP30:%.*]] = and <4 x i8> [[TMP26]], +; CHECK-NEXT: [[TMP31:%.*]] = and <4 x i8> [[TMP27]], +; CHECK-NEXT: [[TMP32:%.*]] = zext <4 x i8> [[TMP28]] to <4 x i32> +; CHECK-NEXT: [[TMP33:%.*]] = zext <4 x i8> [[TMP29]] to <4 x i32> +; CHECK-NEXT: [[TMP34:%.*]] = zext <4 x i8> [[TMP30]] to <4 x i32> +; CHECK-NEXT: [[TMP35:%.*]] = zext <4 x i8> [[TMP31]] to <4 x i32> +; CHECK-NEXT: [[TMP36]] = add <4 x i32> [[VEC_PHI]], [[TMP32]] +; CHECK-NEXT: [[TMP37]] = add <4 x i32> [[VEC_PHI4]], [[TMP33]] +; CHECK-NEXT: [[TMP38]] = add <4 x i32> [[VEC_PHI5]], [[TMP34]] +; CHECK-NEXT: [[TMP39]] = add <4 x i32> [[VEC_PHI6]], [[TMP35]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 +; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[STEP_ADD2]], +; CHECK-NEXT: [[TMP40:%.*]] = icmp eq i64 [[INDEX_NEXT]], 4096 +; CHECK-NEXT: br i1 [[TMP40]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[TMP51]], [[TMP50]] -; CHECK-NEXT: [[TMP53:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX]]) +; CHECK-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[TMP37]], [[TMP36]] +; CHECK-NEXT: [[BIN_RDX13:%.*]] = add <4 x i32> [[TMP38]], [[BIN_RDX]] +; CHECK-NEXT: [[BIN_RDX14:%.*]] = add <4 x i32> [[TMP39]], [[BIN_RDX13]] +; CHECK-NEXT: [[TMP41:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX14]]) ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 4096, 4096 ; CHECK-NEXT: br i1 [[CMP_N]], label [[LOOP_EXIT:%.*]], label [[SCALAR_PH]] ; CHECK: scalar.ph: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 4096, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] -; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[TMP53]], [[MIDDLE_BLOCK]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[TMP41]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: br label [[LOOP:%.*]] ; CHECK: loop: ; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] @@ -453,7 +458,7 @@ ; CHECK-NEXT: [[EXIT:%.*]] = icmp ugt i64 [[IV]], 4094 ; CHECK-NEXT: br i1 [[EXIT]], label [[LOOP_EXIT]], label [[LOOP]], !llvm.loop [[LOOP20:![0-9]+]] ; CHECK: loop_exit: -; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i32 [ [[ACCUM_NEXT]], [[LOOP]] ], [ [[TMP53]], [[MIDDLE_BLOCK]] ] +; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i32 [ [[ACCUM_NEXT]], [[LOOP]] ], [ [[TMP41]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i32 [[ACCUM_NEXT_LCSSA]] ; entry: diff --git a/llvm/test/Transforms/LoopVectorize/pr47343-expander-lcssa-after-cfg-update.ll b/llvm/test/Transforms/LoopVectorize/pr47343-expander-lcssa-after-cfg-update.ll --- a/llvm/test/Transforms/LoopVectorize/pr47343-expander-lcssa-after-cfg-update.ll +++ b/llvm/test/Transforms/LoopVectorize/pr47343-expander-lcssa-after-cfg-update.ll @@ -46,11 +46,12 @@ ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: store i32 0, ptr @f.e, align 1, !alias.scope !0, !noalias !3 +; CHECK-NEXT: [[TMP2:%.*]] = zext i1 false to i32 +; CHECK-NEXT: store i32 [[TMP2]], ptr @f.e, align 1, !alias.scope !0, !noalias !3 ; CHECK-NEXT: store i8 10, ptr [[TMP0]], align 1 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 2 -; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i32 [[INDEX_NEXT]], 500 -; CHECK-NEXT: br i1 [[TMP2]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 500 +; CHECK-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 500, 500 ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] @@ -64,7 +65,7 @@ ; CHECK-NEXT: store i8 10, ptr [[TMP1]], align 1 ; CHECK-NEXT: [[IV_NEXT]] = add nsw i32 [[IV]], 1 ; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV_NEXT]], 500 -; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP7:![0-9]+]] +; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: exit: ; CHECK-NEXT: ret void ;