diff --git a/llvm/lib/Passes/PassBuilderPipelines.cpp b/llvm/lib/Passes/PassBuilderPipelines.cpp --- a/llvm/lib/Passes/PassBuilderPipelines.cpp +++ b/llvm/lib/Passes/PassBuilderPipelines.cpp @@ -321,21 +321,17 @@ // scalars. FPM.addPass(SROAPass(SROAOptions::ModifyCFG)); - // Catch trivial redundancies - FPM.addPass(EarlyCSEPass(true /* Enable mem-ssa. */)); - - // Hoisting of scalars and load expressions. + FPM.addPass(InstCombinePass()); FPM.addPass( SimplifyCFGPass(SimplifyCFGOptions().convertSwitchRangeToICmp(true))); - FPM.addPass(InstCombinePass()); + + // Catch trivial redundancies + FPM.addPass(EarlyCSEPass(true /* Enable mem-ssa. */)); FPM.addPass(LibCallsShrinkWrapPass()); invokePeepholeEPCallbacks(FPM, Level); - FPM.addPass( - SimplifyCFGPass(SimplifyCFGOptions().convertSwitchRangeToICmp(true))); - // Form canonically associated expression trees, and simplify the trees using // basic mathematical properties. For example, this will form (nearly) // minimal multiplication trees. @@ -474,6 +470,10 @@ // scalars. FPM.addPass(SROAPass(SROAOptions::ModifyCFG)); + FPM.addPass(InstCombinePass()); + FPM.addPass( + SimplifyCFGPass(SimplifyCFGOptions().convertSwitchRangeToICmp(true))); + // Catch trivial redundancies FPM.addPass(EarlyCSEPass(true /* Enable mem-ssa. */)); if (EnableKnowledgeRetention) @@ -497,9 +497,6 @@ FPM.addPass(JumpThreadingPass()); FPM.addPass(CorrelatedValuePropagationPass()); - FPM.addPass( - SimplifyCFGPass(SimplifyCFGOptions().convertSwitchRangeToICmp(true))); - FPM.addPass(InstCombinePass()); if (Level == OptimizationLevel::O3) FPM.addPass(AggressiveInstCombinePass()); @@ -1017,18 +1014,6 @@ // Optimize globals to try and fold them into constants. MPM.addPass(GlobalOptPass()); - // Create a small function pass pipeline to cleanup after all the global - // optimizations. - FunctionPassManager GlobalCleanupPM; - // FIXME: Should this instead by a run of SROA? - GlobalCleanupPM.addPass(PromotePass()); - GlobalCleanupPM.addPass(InstCombinePass()); - invokePeepholeEPCallbacks(GlobalCleanupPM, Level); - GlobalCleanupPM.addPass( - SimplifyCFGPass(SimplifyCFGOptions().convertSwitchRangeToICmp(true))); - MPM.addPass(createModuleToFunctionPassAdaptor(std::move(GlobalCleanupPM), - PTO.EagerlyInvalidateAnalyses)); - // Add all the requested passes for instrumentation PGO, if requested. if (PGOOpt && Phase != ThinOrFullLTOPhase::ThinLTOPostLink && (PGOOpt->Action == PGOOptions::IRInstr || diff --git a/llvm/test/Transforms/Coroutines/coro-async.ll b/llvm/test/Transforms/Coroutines/coro-async.ll --- a/llvm/test/Transforms/Coroutines/coro-async.ll +++ b/llvm/test/Transforms/Coroutines/coro-async.ll @@ -130,7 +130,7 @@ ; CHECK-LABEL: define swiftcc void @my_async_function(i8* swiftasync %async.ctxt, %async.task* %task, %async.actor* %actor) ; CHECK-O0-LABEL: define swiftcc void @my_async_function(i8* swiftasync %async.ctxt, %async.task* %task, %async.actor* %actor) ; CHECK-SAME: !dbg ![[SP1:[0-9]+]] { -; CHECK: coro.return: +; CHECK: entry: ; CHECK: [[FRAMEPTR:%.*]] = getelementptr inbounds i8, i8* %async.ctxt, i64 128 ; CHECK: [[ACTOR_SPILL_ADDR:%.*]] = getelementptr inbounds i8, i8* %async.ctxt, i64 152 ; CHECK: [[CAST1:%.*]] = bitcast i8* [[ACTOR_SPILL_ADDR]] to %async.actor** diff --git a/llvm/test/Transforms/Coroutines/coro-retcon-alloca-opaque-ptr.ll b/llvm/test/Transforms/Coroutines/coro-retcon-alloca-opaque-ptr.ll --- a/llvm/test/Transforms/Coroutines/coro-retcon-alloca-opaque-ptr.ll +++ b/llvm/test/Transforms/Coroutines/coro-retcon-alloca-opaque-ptr.ll @@ -7,7 +7,7 @@ declare {ptr, ptr, i32} @prototype_f(ptr, i1) define {ptr, ptr, i32} @f(ptr %buffer, i32 %n, { i32 } %dummy) { ; CHECK-LABEL: @f( -; CHECK-NEXT: coro.return: +; CHECK-NEXT: entry: ; CHECK-NEXT: [[N_VAL_SPILL_ADDR:%.*]] = getelementptr inbounds [[F_FRAME:%.*]], ptr [[BUFFER:%.*]], i64 0, i32 1 ; CHECK-NEXT: store i32 [[N:%.*]], ptr [[N_VAL_SPILL_ADDR]], align 4 ; CHECK-NEXT: [[TMP0:%.*]] = tail call ptr @allocate(i32 [[N]]) @@ -42,7 +42,7 @@ declare {ptr, i32} @prototype_g(ptr, i1) define {ptr, i32} @g(ptr %buffer, i32 %n) { ; CHECK-LABEL: @g( -; CHECK-NEXT: coro.return: +; CHECK-NEXT: entry: ; CHECK-NEXT: store i32 [[N:%.*]], ptr [[BUFFER:%.*]], align 4 ; CHECK-NEXT: [[TMP0:%.*]] = zext i32 [[N]] to i64 ; CHECK-NEXT: [[TMP1:%.*]] = alloca i8, i64 [[TMP0]], align 8 diff --git a/llvm/test/Transforms/Coroutines/coro-retcon-alloca.ll b/llvm/test/Transforms/Coroutines/coro-retcon-alloca.ll --- a/llvm/test/Transforms/Coroutines/coro-retcon-alloca.ll +++ b/llvm/test/Transforms/Coroutines/coro-retcon-alloca.ll @@ -6,7 +6,7 @@ declare {i8*, i8*, i32} @prototype_f(i8*, i1) define {i8*, i8*, i32} @f(i8* %buffer, i32 %n) { ; CHECK-LABEL: @f( -; CHECK-NEXT: coro.return: +; CHECK-NEXT: entry: ; CHECK-NEXT: [[N_VAL_SPILL_ADDR:%.*]] = getelementptr inbounds i8, i8* [[BUFFER:%.*]], i64 8 ; CHECK-NEXT: [[TMP0:%.*]] = bitcast i8* [[N_VAL_SPILL_ADDR]] to i32* ; CHECK-NEXT: store i32 [[N:%.*]], i32* [[TMP0]], align 4 @@ -44,7 +44,7 @@ declare {i8*, i32} @prototype_g(i8*, i1) define {i8*, i32} @g(i8* %buffer, i32 %n) { ; CHECK-LABEL: @g( -; CHECK-NEXT: coro.return: +; CHECK-NEXT: entry: ; CHECK-NEXT: [[N_VAL_SPILL_ADDR:%.*]] = bitcast i8* [[BUFFER:%.*]] to i32* ; CHECK-NEXT: store i32 [[N:%.*]], i32* [[N_VAL_SPILL_ADDR]], align 4 ; CHECK-NEXT: [[TMP0:%.*]] = zext i32 [[N]] to i64 @@ -81,7 +81,7 @@ declare {i8*, i32} @prototype_h(i8*, i1) define {i8*, i32} @h(i8* %buffer, i32 %n) { ; CHECK-LABEL: @h( -; CHECK-NEXT: coro.return: +; CHECK-NEXT: entry: ; CHECK-NEXT: [[N_SPILL_ADDR:%.*]] = bitcast i8* [[BUFFER:%.*]] to i32* ; CHECK-NEXT: store i32 [[N:%.*]], i32* [[N_SPILL_ADDR]], align 4 ; CHECK-NEXT: [[TMP0:%.*]] = insertvalue { i8*, i32 } { i8* bitcast ({ i8*, i32 } (i8*, i1)* @h.resume.0 to i8*), i32 undef }, i32 [[N]], 1 @@ -115,7 +115,7 @@ declare {i8*, i32} @prototype_i(i8*) define {i8*, i32} @i(i8* %buffer, i32 %n) { ; CHECK-LABEL: @i( -; CHECK-NEXT: coro.return: +; CHECK-NEXT: entry: ; CHECK-NEXT: [[N_VAL_SPILL_ADDR:%.*]] = bitcast i8* [[BUFFER:%.*]] to i32* ; CHECK-NEXT: store i32 [[N:%.*]], i32* [[N_VAL_SPILL_ADDR]], align 4 ; CHECK-NEXT: [[TMP0:%.*]] = insertvalue { i8*, i32 } { i8* bitcast ({ i8*, i32 } (i8*)* @i.resume.0 to i8*), i32 undef }, i32 [[N]], 1 @@ -148,7 +148,7 @@ declare {i8*, i32} @prototype_j(i8*) define {i8*, i32} @j(i8* %buffer, i32 %n) { ; CHECK-LABEL: @j( -; CHECK-NEXT: coro.return: +; CHECK-NEXT: entry: ; CHECK-NEXT: [[N_VAL_SPILL_ADDR:%.*]] = bitcast i8* [[BUFFER:%.*]] to i32* ; CHECK-NEXT: store i32 [[N:%.*]], i32* [[N_VAL_SPILL_ADDR]], align 4 ; CHECK-NEXT: [[TMP0:%.*]] = insertvalue { i8*, i32 } { i8* bitcast ({ i8*, i32 } (i8*)* @j.resume.0 to i8*), i32 undef }, i32 [[N]], 1 @@ -183,7 +183,7 @@ declare i32 @getSize() define {i8*, i32} @k(i8* %buffer, i32 %n, i1 %cond) { ; CHECK-LABEL: @k( -; CHECK-NEXT: PostSpill: +; CHECK-NEXT: entry: ; CHECK-NEXT: [[SIZE:%.*]] = tail call i32 @getSize() ; CHECK-NEXT: br i1 [[COND:%.*]], label [[ALLOCA_BLOCK:%.*]], label [[CORO_RETURN:%.*]] ; CHECK: coro.return: diff --git a/llvm/test/Transforms/Coroutines/coro-retcon-once-value.ll b/llvm/test/Transforms/Coroutines/coro-retcon-once-value.ll --- a/llvm/test/Transforms/Coroutines/coro-retcon-once-value.ll +++ b/llvm/test/Transforms/Coroutines/coro-retcon-once-value.ll @@ -6,15 +6,15 @@ define {i8*, i32} @f(i8* %buffer, i32* %array) { ; CHECK-LABEL: @f( -; CHECK-NEXT: PostSpill: +; CHECK-NEXT: entry: ; CHECK-NEXT: [[ARRAY_SPILL_ADDR:%.*]] = bitcast i8* [[BUFFER:%.*]] to i32** ; CHECK-NEXT: store i32* [[ARRAY:%.*]], i32** [[ARRAY_SPILL_ADDR]], align 8 ; CHECK-NEXT: [[LOAD:%.*]] = load i32, i32* [[ARRAY]], align 4 ; CHECK-NEXT: [[LOAD_POS:%.*]] = icmp sgt i32 [[LOAD]], 0 -; CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.smax.i32(i32 [[LOAD]], i32 0) -; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[LOAD_POS]], i8* bitcast (void (i8*, i1)* @f.resume.0 to i8*), i8* bitcast (void (i8*, i1)* @f.resume.1 to i8*) -; CHECK-NEXT: [[TMP2:%.*]] = insertvalue { i8*, i32 } undef, i8* [[TMP1]], 0 -; CHECK-NEXT: [[TMP3:%.*]] = insertvalue { i8*, i32 } [[TMP2]], i32 [[TMP0]], 1 +; CHECK-NEXT: [[TMP0:%.*]] = select i1 [[LOAD_POS]], i8* bitcast (void (i8*, i1)* @f.resume.0 to i8*), i8* bitcast (void (i8*, i1)* @f.resume.1 to i8*) +; CHECK-NEXT: [[TMP1:%.*]] = tail call i32 @llvm.smax.i32(i32 [[LOAD]], i32 0) +; CHECK-NEXT: [[TMP2:%.*]] = insertvalue { i8*, i32 } undef, i8* [[TMP0]], 0 +; CHECK-NEXT: [[TMP3:%.*]] = insertvalue { i8*, i32 } [[TMP2]], i32 [[TMP1]], 1 ; CHECK-NEXT: ret { i8*, i32 } [[TMP3]] ; entry: @@ -48,15 +48,15 @@ define void @test(i32* %array) { ; CHECK-LABEL: @test( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = alloca i32*, align 8 -; CHECK-NEXT: [[DOTSUB:%.*]] = bitcast i32** [[TMP0]] to i8* -; CHECK-NEXT: store i32* [[ARRAY:%.*]], i32** [[TMP0]], align 8 +; CHECK-NEXT: [[BUFFER1:%.*]] = alloca i32*, align 8 +; CHECK-NEXT: [[BUFFER1_SUB:%.*]] = bitcast i32** [[BUFFER1]] to i8* +; CHECK-NEXT: store i32* [[ARRAY:%.*]], i32** [[BUFFER1]], align 8 ; CHECK-NEXT: [[LOAD_I:%.*]] = load i32, i32* [[ARRAY]], align 4 ; CHECK-NEXT: [[LOAD_POS_I:%.*]] = icmp sgt i32 [[LOAD_I]], 0 -; CHECK-NEXT: [[TMP1:%.*]] = tail call i32 @llvm.smax.i32(i32 [[LOAD_I]], i32 0) -; CHECK-NEXT: tail call void @print(i32 [[TMP1]]) +; CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.smax.i32(i32 [[LOAD_I]], i32 0) +; CHECK-NEXT: tail call void @print(i32 [[TMP0]]) ; CHECK-NEXT: [[CONT_CAST:%.*]] = select i1 [[LOAD_POS_I]], void (i8*, i1)* @f.resume.0, void (i8*, i1)* @f.resume.1 -; CHECK-NEXT: call void [[CONT_CAST]](i8* nonnull [[DOTSUB]], i1 zeroext false) +; CHECK-NEXT: call void [[CONT_CAST]](i8* nonnull [[BUFFER1_SUB]], i1 zeroext false) ; CHECK-NEXT: ret void ; entry: diff --git a/llvm/test/Transforms/Coroutines/coro-retcon-opaque-ptr.ll b/llvm/test/Transforms/Coroutines/coro-retcon-opaque-ptr.ll --- a/llvm/test/Transforms/Coroutines/coro-retcon-opaque-ptr.ll +++ b/llvm/test/Transforms/Coroutines/coro-retcon-opaque-ptr.ll @@ -5,7 +5,7 @@ define ptr @f(ptr %buffer, i32 %n) { ; CHECK-LABEL: @f( -; CHECK-NEXT: coro.return: +; CHECK-NEXT: entry: ; CHECK-NEXT: store i32 [[N:%.*]], ptr [[BUFFER:%.*]], align 4 ; CHECK-NEXT: tail call void @print(i32 [[N]]) ; CHECK-NEXT: ret ptr @f.resume.0 @@ -33,9 +33,9 @@ define i32 @main() { ; CHECK-LABEL: @main( ; CHECK-NEXT: entry: -; CHECK-NEXT: call void @print(i32 4) -; CHECK-NEXT: call void @print(i32 5), !noalias !0 -; CHECK-NEXT: call void @print(i32 6), !noalias !3 +; CHECK-NEXT: tail call void @print(i32 4) +; CHECK-NEXT: tail call void @print(i32 5), !noalias !0 +; CHECK-NEXT: tail call void @print(i32 6), !noalias !3 ; CHECK-NEXT: ret i32 0 ; entry: @@ -50,7 +50,7 @@ define hidden { ptr, ptr } @g(ptr %buffer, ptr %ptr) { ; CHECK-LABEL: @g( -; CHECK-NEXT: coro.return: +; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = tail call ptr @allocate(i32 8) ; CHECK-NEXT: store ptr [[TMP0]], ptr [[BUFFER:%.*]], align 8 ; CHECK-NEXT: store ptr [[PTR:%.*]], ptr [[TMP0]], align 8 diff --git a/llvm/test/Transforms/Coroutines/coro-retcon-resume-values.ll b/llvm/test/Transforms/Coroutines/coro-retcon-resume-values.ll --- a/llvm/test/Transforms/Coroutines/coro-retcon-resume-values.ll +++ b/llvm/test/Transforms/Coroutines/coro-retcon-resume-values.ll @@ -3,7 +3,7 @@ define i8* @f(i8* %buffer, i32 %n) { ; CHECK-LABEL: @f( -; CHECK-NEXT: coro.return: +; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = tail call i8* @allocate(i32 12) ; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8* [[BUFFER:%.*]] to i8** ; CHECK-NEXT: store i8* [[TMP0]], i8** [[TMP1]], align 8 @@ -55,8 +55,8 @@ ; CHECK-NEXT: [[SUM7_I:%.*]] = add i32 [[N_VAL3_RELOAD12_I]], 2 ; CHECK-NEXT: store i32 [[SUM7_I]], i32* [[TMP4]], align 4, !noalias !3 ; CHECK-NEXT: store i32 4, i32* [[TMP3]], align 4, !noalias !3 -; CHECK-NEXT: [[SUM7_I7:%.*]] = add i32 [[N_VAL3_RELOAD12_I]], 6 -; CHECK-NEXT: tail call void @print(i32 [[SUM7_I7]]), !noalias !6 +; CHECK-NEXT: [[SUM7_I8:%.*]] = add i32 [[N_VAL3_RELOAD12_I]], 6 +; CHECK-NEXT: tail call void @print(i32 [[SUM7_I8]]), !noalias !6 ; CHECK-NEXT: tail call void @deallocate(i8* [[TMP0]]), !noalias !6 ; CHECK-NEXT: ret i32 0 ; diff --git a/llvm/test/Transforms/Coroutines/coro-retcon-value.ll b/llvm/test/Transforms/Coroutines/coro-retcon-value.ll --- a/llvm/test/Transforms/Coroutines/coro-retcon-value.ll +++ b/llvm/test/Transforms/Coroutines/coro-retcon-value.ll @@ -4,7 +4,7 @@ define {ptr, i32} @f(ptr %buffer, i32 %n) { ; CHECK-LABEL: @f( -; CHECK-NEXT: coro.return: +; CHECK-NEXT: entry: ; CHECK-NEXT: store i32 [[N:%.*]], ptr [[BUFFER:%.*]], align 4 ; CHECK-NEXT: [[TMP0:%.*]] = insertvalue { ptr, i32 } { ptr @f.resume.0, i32 undef }, i32 [[N]], 1 ; CHECK-NEXT: ret { ptr, i32 } [[TMP0]] @@ -34,9 +34,9 @@ define i32 @main() { ; CHECK-LABEL: @main( ; CHECK-NEXT: entry: -; CHECK-NEXT: call void @print(i32 4) -; CHECK-NEXT: call void @print(i32 5) -; CHECK-NEXT: call void @print(i32 6) +; CHECK-NEXT: tail call void @print(i32 4) +; CHECK-NEXT: tail call void @print(i32 5) +; CHECK-NEXT: tail call void @print(i32 6) ; CHECK-NEXT: ret i32 0 ; entry: diff --git a/llvm/test/Transforms/Coroutines/coro-retcon.ll b/llvm/test/Transforms/Coroutines/coro-retcon.ll --- a/llvm/test/Transforms/Coroutines/coro-retcon.ll +++ b/llvm/test/Transforms/Coroutines/coro-retcon.ll @@ -5,7 +5,7 @@ define i8* @f(i8* %buffer, i32 %n) { ; CHECK-LABEL: @f( -; CHECK-NEXT: coro.return: +; CHECK-NEXT: entry: ; CHECK-NEXT: [[N_VAL_SPILL_ADDR:%.*]] = bitcast i8* [[BUFFER:%.*]] to i32* ; CHECK-NEXT: store i32 [[N:%.*]], i32* [[N_VAL_SPILL_ADDR]], align 4 ; CHECK-NEXT: tail call void @print(i32 [[N]]) @@ -83,15 +83,15 @@ define hidden { i8*, i8* } @g(i8* %buffer, i16* %ptr) { ; CHECK-LABEL: @g( -; CHECK-NEXT: coro.return: +; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = tail call i8* @allocate(i32 8) ; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8* [[BUFFER:%.*]] to i8** ; CHECK-NEXT: store i8* [[TMP0]], i8** [[TMP1]], align 8 ; CHECK-NEXT: [[PTR_SPILL_ADDR:%.*]] = bitcast i8* [[TMP0]] to i16** ; CHECK-NEXT: store i16* [[PTR:%.*]], i16** [[PTR_SPILL_ADDR]], align 8 -; CHECK-NEXT: [[TMP2:%.*]] = bitcast i16* [[PTR]] to i8* -; CHECK-NEXT: [[TMP3:%.*]] = insertvalue { i8*, i8* } { i8* bitcast ({ i8*, i8* } (i8*, i1)* @g.resume.0 to i8*), i8* undef }, i8* [[TMP2]], 1 -; CHECK-NEXT: ret { i8*, i8* } [[TMP3]] +; CHECK-NEXT: [[PTR_RELOAD1_CAST:%.*]] = bitcast i16* [[PTR]] to i8* +; CHECK-NEXT: [[TMP2:%.*]] = insertvalue { i8*, i8* } { i8* bitcast ({ i8*, i8* } (i8*, i1)* @g.resume.0 to i8*), i8* undef }, i8* [[PTR_RELOAD1_CAST]], 1 +; CHECK-NEXT: ret { i8*, i8* } [[TMP2]] ; ; CORO-LABEL: @g( ; CORO-NEXT: entry: diff --git a/llvm/test/Transforms/Coroutines/coro-swifterror.ll b/llvm/test/Transforms/Coroutines/coro-swifterror.ll --- a/llvm/test/Transforms/Coroutines/coro-swifterror.ll +++ b/llvm/test/Transforms/Coroutines/coro-swifterror.ll @@ -4,7 +4,7 @@ define i8* @f(i8* %buffer, i32 %n, i8** swifterror %errorslot) { ; CHECK-LABEL: @f( -; CHECK-NEXT: coro.return: +; CHECK-NEXT: entry: ; CHECK-NEXT: [[N_VAL_SPILL_ADDR:%.*]] = bitcast i8* [[BUFFER:%.*]] to i32* ; CHECK-NEXT: store i32 [[N:%.*]], i32* [[N_VAL_SPILL_ADDR]], align 4 ; CHECK-NEXT: tail call void @print(i32 [[N]]) @@ -44,7 +44,7 @@ define i8* @g(i8* %buffer, i32 %n) { ; CHECK-LABEL: @g( -; CHECK-NEXT: coro.return: +; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = alloca swifterror i8*, align 4 ; CHECK-NEXT: [[N_VAL_SPILL_ADDR:%.*]] = bitcast i8* [[BUFFER:%.*]] to i32* ; CHECK-NEXT: store i32 [[N:%.*]], i32* [[N_VAL_SPILL_ADDR]], align 4 diff --git a/llvm/test/Transforms/PhaseOrdering/X86/pr52289.ll b/llvm/test/Transforms/PhaseOrdering/X86/pr52289.ll --- a/llvm/test/Transforms/PhaseOrdering/X86/pr52289.ll +++ b/llvm/test/Transforms/PhaseOrdering/X86/pr52289.ll @@ -6,6 +6,18 @@ define i32 @main(i32 %a) { ; CHECK-LABEL: @main( ; CHECK-NEXT: if.end: +; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[A:%.*]], 1 +; CHECK-NEXT: [[CONV:%.*]] = and i32 [[TMP0]], 8 +; CHECK-NEXT: [[CMP_I_NOT:%.*]] = icmp eq i32 [[CONV]], 0 +; CHECK-NEXT: [[SHL_I:%.*]] = select i1 [[CMP_I_NOT]], i32 7, i32 0 +; CHECK-NEXT: [[COND_I:%.*]] = shl nuw nsw i32 [[CONV]], [[SHL_I]] +; CHECK-NEXT: [[CONV4_I:%.*]] = zext i32 [[COND_I]] to i64 +; CHECK-NEXT: [[SEXT:%.*]] = shl i64 [[CONV4_I]], 56 +; CHECK-NEXT: [[CONV1:%.*]] = ashr exact i64 [[SEXT]], 56 +; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[CONV1]] to i32 +; CHECK-NEXT: [[SEXT1:%.*]] = mul i32 [[TMP1]], 1355350016 +; CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[SEXT1]], 65536 +; CHECK-NEXT: tail call void @llvm.assume(i1 [[TOBOOL]]) ; CHECK-NEXT: ret i32 0 ; %inc = add nsw i32 %a, 1 diff --git a/llvm/test/Transforms/PhaseOrdering/inlining-alignment-assumptions.ll b/llvm/test/Transforms/PhaseOrdering/inlining-alignment-assumptions.ll --- a/llvm/test/Transforms/PhaseOrdering/inlining-alignment-assumptions.ll +++ b/llvm/test/Transforms/PhaseOrdering/inlining-alignment-assumptions.ll @@ -16,10 +16,13 @@ define void @caller1(i1 %c, ptr align 1 %ptr) { ; ASSUMPTIONS-OFF-LABEL: @caller1( -; ASSUMPTIONS-OFF-NEXT: br i1 [[C:%.*]], label [[COMMON_RET:%.*]], label [[FALSE2:%.*]] +; ASSUMPTIONS-OFF-NEXT: br i1 [[C:%.*]], label [[COMMON_RET:%.*]], label [[FALSE1:%.*]] +; ASSUMPTIONS-OFF: false1: +; ASSUMPTIONS-OFF-NEXT: store volatile i64 1, ptr [[PTR:%.*]], align 4 +; ASSUMPTIONS-OFF-NEXT: br label [[COMMON_RET]] ; ASSUMPTIONS-OFF: common.ret: -; ASSUMPTIONS-OFF-NEXT: [[DOTSINK:%.*]] = phi i64 [ 3, [[FALSE2]] ], [ 2, [[TMP0:%.*]] ] -; ASSUMPTIONS-OFF-NEXT: store volatile i64 0, ptr [[PTR:%.*]], align 8 +; ASSUMPTIONS-OFF-NEXT: [[DOTSINK:%.*]] = phi i64 [ 3, [[FALSE1]] ], [ 2, [[TMP0:%.*]] ] +; ASSUMPTIONS-OFF-NEXT: store volatile i64 0, ptr [[PTR]], align 8 ; ASSUMPTIONS-OFF-NEXT: store volatile i64 -1, ptr [[PTR]], align 4 ; ASSUMPTIONS-OFF-NEXT: store volatile i64 -1, ptr [[PTR]], align 4 ; ASSUMPTIONS-OFF-NEXT: store volatile i64 -1, ptr [[PTR]], align 4 @@ -27,15 +30,15 @@ ; ASSUMPTIONS-OFF-NEXT: store volatile i64 -1, ptr [[PTR]], align 4 ; ASSUMPTIONS-OFF-NEXT: store volatile i64 [[DOTSINK]], ptr [[PTR]], align 4 ; ASSUMPTIONS-OFF-NEXT: ret void -; ASSUMPTIONS-OFF: false2: -; ASSUMPTIONS-OFF-NEXT: store volatile i64 1, ptr [[PTR]], align 4 -; ASSUMPTIONS-OFF-NEXT: br label [[COMMON_RET]] ; ; ASSUMPTIONS-ON-LABEL: @caller1( -; ASSUMPTIONS-ON-NEXT: br i1 [[C:%.*]], label [[COMMON_RET:%.*]], label [[FALSE2:%.*]] +; ASSUMPTIONS-ON-NEXT: br i1 [[C:%.*]], label [[COMMON_RET:%.*]], label [[FALSE1:%.*]] +; ASSUMPTIONS-ON: false1: +; ASSUMPTIONS-ON-NEXT: store volatile i64 1, ptr [[PTR:%.*]], align 4 +; ASSUMPTIONS-ON-NEXT: br label [[COMMON_RET]] ; ASSUMPTIONS-ON: common.ret: -; ASSUMPTIONS-ON-NEXT: [[DOTSINK:%.*]] = phi i64 [ 3, [[FALSE2]] ], [ 2, [[TMP0:%.*]] ] -; ASSUMPTIONS-ON-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[PTR:%.*]], i64 8) ] +; ASSUMPTIONS-ON-NEXT: [[DOTSINK:%.*]] = phi i64 [ 3, [[FALSE1]] ], [ 2, [[TMP0:%.*]] ] +; ASSUMPTIONS-ON-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[PTR]], i64 8) ] ; ASSUMPTIONS-ON-NEXT: store volatile i64 0, ptr [[PTR]], align 8 ; ASSUMPTIONS-ON-NEXT: store volatile i64 -1, ptr [[PTR]], align 8 ; ASSUMPTIONS-ON-NEXT: store volatile i64 -1, ptr [[PTR]], align 8 @@ -44,9 +47,6 @@ ; ASSUMPTIONS-ON-NEXT: store volatile i64 -1, ptr [[PTR]], align 8 ; ASSUMPTIONS-ON-NEXT: store volatile i64 [[DOTSINK]], ptr [[PTR]], align 8 ; ASSUMPTIONS-ON-NEXT: ret void -; ASSUMPTIONS-ON: false2: -; ASSUMPTIONS-ON-NEXT: store volatile i64 1, ptr [[PTR]], align 4 -; ASSUMPTIONS-ON-NEXT: br label [[COMMON_RET]] ; br i1 %c, label %true1, label %false1