Index: llvm/lib/Transforms/Vectorize/LoopVectorize.cpp =================================================================== --- llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -505,6 +505,10 @@ /// Fix the vectorized code, taking care of header phi's, live-outs, and more. void fixVectorizedLoop(VPTransformState &State, VPlan &Plan); + /// Replace the symbolic strides with constants in vector region. + void replaceSymbolicStrides( + VPlan &Plan, SmallDenseMap &VPBB2IRBB); + // Return true if any runtime check is added. bool areSafetyChecksAdded() { return AddedSafetyChecks; } @@ -3770,6 +3774,23 @@ VF.getKnownMinValue() * UF); } +void InnerLoopVectorizer::replaceSymbolicStrides( + VPlan &Plan, SmallDenseMap &VPBB2IRBB) { + if (!Legal->getLAI()) + return; + + for (auto [_, Stride] : Legal->getLAI()->getSymbolicStrides()) { + auto *ScevCT = cast(PSE.getSCEV(Stride)); + Value *CT = ConstantInt::get(Stride->getType(), ScevCT->getAPInt()); + Plan.getVectorLoopRegion()->replaceAllUses(Stride, CT, VPBB2IRBB); + auto BB = LoopVectorPreHeader; + Stride->replaceUsesWithIf(CT, [BB](Use &U) { + auto *I = dyn_cast(U.getUser()); + return I && I->getParent() == BB; + }); + } +} + void InnerLoopVectorizer::fixCrossIterationPHIs(VPTransformState &State) { // In order to support recurrences we need to be able to vectorize Phi nodes. // Phi nodes have cycles, so we need to vectorize them in two stages. This is @@ -7755,6 +7776,10 @@ // predication, updating analyses. ILV.fixVectorizedLoop(State, BestVPlan); + // 4. Replace the symbolic strides with constants in vector region. This + // relies on the GVN pass, but it is run before LV pass. So, do it here. + ILV.replaceSymbolicStrides(BestVPlan, State.CFG.VPBB2IRBB); + ILV.printDebugTracesAtEnd(); } Index: llvm/lib/Transforms/Vectorize/VPlan.h =================================================================== --- llvm/lib/Transforms/Vectorize/VPlan.h +++ llvm/lib/Transforms/Vectorize/VPlan.h @@ -2162,6 +2162,9 @@ /// this VPRegionBlock, thereby "executing" the VPlan. void execute(VPTransformState *State) override; + void replaceAllUses(Value *OldValue, Value *NewValue, + SmallDenseMap &VPBB2IRBB); + void dropAllReferences(VPValue *NewValue) override; #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) Index: llvm/lib/Transforms/Vectorize/VPlan.cpp =================================================================== --- llvm/lib/Transforms/Vectorize/VPlan.cpp +++ llvm/lib/Transforms/Vectorize/VPlan.cpp @@ -513,6 +513,18 @@ Block->dropAllReferences(NewValue); } +void VPRegionBlock::replaceAllUses( + Value *OldValue, Value *NewValue, + SmallDenseMap &VPBB2IRBB) { + for (VPBlockBase *Block : vp_depth_first_shallow(Entry)) { + BasicBlock *BB = VPBB2IRBB[cast(Block)]; + OldValue->replaceUsesWithIf(NewValue, [BB](Use &U) { + auto *I = dyn_cast(U.getUser()); + return I && I->getParent() == BB; + }); + } +} + void VPRegionBlock::execute(VPTransformState *State) { ReversePostOrderTraversal> RPOT(Entry); Index: llvm/test/Transforms/LoopVectorize/ARM/mve-gather-scatter-tailpred.ll =================================================================== --- llvm/test/Transforms/LoopVectorize/ARM/mve-gather-scatter-tailpred.ll +++ llvm/test/Transforms/LoopVectorize/ARM/mve-gather-scatter-tailpred.ll @@ -341,7 +341,7 @@ ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[INDEX]], 0 ; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 [[TMP0]], i32 [[N]]) -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw nsw i32 [[TMP0]], [[STRIDE]] +; CHECK-NEXT: [[TMP1:%.*]] = mul nuw nsw i32 [[TMP0]], 1 ; CHECK-NEXT: [[TMP2:%.*]] = add nuw nsw i32 [[TMP1]], 2 ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[DATA:%.*]], i32 [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i32 0 Index: llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll =================================================================== --- llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll +++ llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll @@ -233,7 +233,7 @@ ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[INDEX]], 0 -; CHECK-NEXT: [[TMP6:%.*]] = mul nuw nsw i64 [[TMP5]], [[STRIDE]] +; CHECK-NEXT: [[TMP6:%.*]] = mul nuw nsw i64 [[TMP5]], 1 ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[TMP6]] ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i32, ptr [[TMP7]], i32 0 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP8]], align 4 @@ -297,12 +297,12 @@ ; CHECK-NEXT: [[TMP4:%.*]] = mul i64 [[TMP3]], 4 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP4]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] -; CHECK-NEXT: [[IND_END:%.*]] = mul i64 [[N_VEC]], [[STRIDE]] +; CHECK-NEXT: [[IND_END:%.*]] = mul i64 [[N_VEC]], 1 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], [[STRIDE]] -; CHECK-NEXT: [[TMP5:%.*]] = mul i64 0, [[STRIDE]] +; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 1 +; CHECK-NEXT: [[TMP5:%.*]] = mul i64 0, 1 ; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[OFFSET_IDX]], [[TMP5]] ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[TMP6]] ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i32, ptr [[TMP7]], i32 0 @@ -419,7 +419,7 @@ ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[INDEX]], 0 -; CHECK-NEXT: [[TMP10:%.*]] = mul nuw nsw i64 [[TMP9]], [[STRIDE]] +; CHECK-NEXT: [[TMP10:%.*]] = mul nuw nsw i64 [[TMP9]], 1 ; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[P]], i64 [[TMP10]] ; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i32, ptr [[TMP11]], i32 0 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP12]], align 4 @@ -487,12 +487,12 @@ ; CHECK-NEXT: [[TMP4:%.*]] = mul i64 [[TMP3]], 4 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP4]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] -; CHECK-NEXT: [[IND_END:%.*]] = mul i64 [[N_VEC]], [[STRIDE]] +; CHECK-NEXT: [[IND_END:%.*]] = mul i64 [[N_VEC]], 1 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], [[STRIDE]] -; CHECK-NEXT: [[TMP5:%.*]] = mul i64 0, [[STRIDE]] +; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 1 +; CHECK-NEXT: [[TMP5:%.*]] = mul i64 0, 1 ; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[OFFSET_IDX]], [[TMP5]] ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[TMP6]] ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i32, ptr [[TMP7]], i32 0 @@ -588,9 +588,9 @@ ; STRIDED-NEXT: [[TMP4:%.*]] = mul i64 [[TMP3]], 4 ; STRIDED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP4]] ; STRIDED-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] -; STRIDED-NEXT: [[TMP5:%.*]] = mul i64 [[N_VEC]], [[STRIDE]] +; STRIDED-NEXT: [[TMP5:%.*]] = mul i64 [[N_VEC]], 1 ; STRIDED-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP5]] -; STRIDED-NEXT: [[TMP6:%.*]] = mul i64 [[N_VEC]], [[STRIDE]] +; STRIDED-NEXT: [[TMP6:%.*]] = mul i64 [[N_VEC]], 1 ; STRIDED-NEXT: [[IND_END3:%.*]] = getelementptr i8, ptr [[P2]], i64 [[TMP6]] ; STRIDED-NEXT: br label [[VECTOR_BODY:%.*]] ; STRIDED: vector.body: @@ -600,20 +600,20 @@ ; STRIDED-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() ; STRIDED-NEXT: [[TMP8:%.*]] = mul i64 [[TMP7]], 4 ; STRIDED-NEXT: [[TMP9:%.*]] = mul i64 [[TMP8]], 1 -; STRIDED-NEXT: [[TMP10:%.*]] = mul i64 [[STRIDE]], [[TMP9]] +; STRIDED-NEXT: [[TMP10:%.*]] = mul i64 1, [[TMP9]] ; STRIDED-NEXT: [[TMP11:%.*]] = mul i64 [[TMP8]], 0 ; STRIDED-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement poison, i64 [[TMP11]], i64 0 ; STRIDED-NEXT: [[DOTSPLAT:%.*]] = shufflevector [[DOTSPLATINSERT]], poison, zeroinitializer ; STRIDED-NEXT: [[TMP12:%.*]] = call @llvm.experimental.stepvector.nxv4i64() ; STRIDED-NEXT: [[TMP13:%.*]] = add [[DOTSPLAT]], [[TMP12]] -; STRIDED-NEXT: [[DOTSPLATINSERT5:%.*]] = insertelement poison, i64 [[STRIDE]], i64 0 +; STRIDED-NEXT: [[DOTSPLATINSERT5:%.*]] = insertelement poison, i64 1, i64 0 ; STRIDED-NEXT: [[DOTSPLAT6:%.*]] = shufflevector [[DOTSPLATINSERT5]], poison, zeroinitializer ; STRIDED-NEXT: [[VECTOR_GEP:%.*]] = mul [[TMP13]], [[DOTSPLAT6]] ; STRIDED-NEXT: [[TMP14:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], [[VECTOR_GEP]] ; STRIDED-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64() ; STRIDED-NEXT: [[TMP16:%.*]] = mul i64 [[TMP15]], 4 ; STRIDED-NEXT: [[TMP17:%.*]] = mul i64 [[TMP16]], 1 -; STRIDED-NEXT: [[TMP18:%.*]] = mul i64 [[STRIDE]], [[TMP17]] +; STRIDED-NEXT: [[TMP18:%.*]] = mul i64 1, [[TMP17]] ; STRIDED-NEXT: [[TMP19:%.*]] = mul i64 [[TMP16]], 0 ; STRIDED-NEXT: [[DOTSPLATINSERT9:%.*]] = insertelement poison, i64 [[TMP19]], i64 0 ; STRIDED-NEXT: [[DOTSPLAT10:%.*]] = shufflevector [[DOTSPLATINSERT9]], poison, zeroinitializer Index: llvm/test/Transforms/LoopVectorize/runtime-check-needed-but-empty.ll =================================================================== --- llvm/test/Transforms/LoopVectorize/runtime-check-needed-but-empty.ll +++ llvm/test/Transforms/LoopVectorize/runtime-check-needed-but-empty.ll @@ -17,12 +17,12 @@ ; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[OFFSET_IDX]], 0 ; CHECK-NEXT: [[TMP3:%.*]] = add nuw nsw i64 [[TMP2]], 1 ; CHECK-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32 -; CHECK-NEXT: [[TMP5:%.*]] = mul i32 [[TMP4]], [[X]] +; CHECK-NEXT: [[TMP5:%.*]] = mul i32 [[TMP4]], 1 ; CHECK-NEXT: [[TMP6:%.*]] = zext i32 [[TMP5]] to i64 ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[TMP6]] ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, ptr [[TMP7]], i32 0 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP8]], align 4 -; CHECK-NEXT: [[TMP9:%.*]] = mul i32 [[TMP1]], [[X]] +; CHECK-NEXT: [[TMP9:%.*]] = mul i32 [[TMP1]], 1 ; CHECK-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 ; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP10]] ; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[TMP11]], i32 0 Index: llvm/test/Transforms/LoopVectorize/version-mem-access.ll =================================================================== --- llvm/test/Transforms/LoopVectorize/version-mem-access.ll +++ llvm/test/Transforms/LoopVectorize/version-mem-access.ll @@ -16,6 +16,7 @@ %cmp13 = icmp eq i32 %N, 0 br i1 %cmp13, label %for.end, label %for.body.preheader +; CHECK: vector.scevcheck ; CHECK-DAG: icmp ne i64 %AStride, 1 ; CHECK-DAG: icmp ne i32 %BStride, 1 ; CHECK-DAG: icmp ne i64 %CStride, 1 @@ -24,7 +25,17 @@ ; CHECK: br ; CHECK: vector.body +; CHECK: mul i32 %{{.*}}, 1 ; CHECK: load <2 x i32> +; CHECK: mul nsw i64 %{{.*}}, 1 +; CHECK: load <2 x i32> +; CHECK: mul nsw i64 %{{.*}}, 1 +; CHECK: store <2 x i32> + +; CHECK: for.body +; CHECK-DAG: mul i32 %{{.*}}, %BStride +; CHECK-DAG: mul nsw i64 %{{.*}}, %CStride +; CHECK-DAG: mul nsw i64 %{{.*}}, %AStride for.body.preheader: br label %for.body @@ -60,8 +71,16 @@ ; PR18480 ; CHECK-LABEL: fn1 +; CHECK: vector.scevcheck +; CHECK: icmp ne i32 %[[conv:.*]], 1 + +; CHECK: vector.body +; CHECK: mul nsw i32 %{{.*}}, 1 ; CHECK: load <2 x double> +; CHECK: for.body +; CHECK: mul nsw i32 %{{.*}}, %[[conv]] + define void @fn1(ptr noalias %x, ptr noalias %c, double %a) { entry: %conv = fptosi double %a to i32