Index: llvm/test/Transforms/EarlyCSE/commute.ll =================================================================== --- llvm/test/Transforms/EarlyCSE/commute.ll +++ llvm/test/Transforms/EarlyCSE/commute.ll @@ -1,12 +1,12 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -S -early-cse -earlycse-debug-hash | FileCheck %s -; RUN: opt < %s -S -basic-aa -early-cse-memssa | FileCheck %s +; RUN: opt < %s -S -early-cse -earlycse-debug-hash -normalize-opaque-pointers | FileCheck %s +; RUN: opt < %s -S -basic-aa -early-cse-memssa -normalize-opaque-pointers | FileCheck %s define void @test1(float %A, float %B, float* %PA, float* %PB) { ; CHECK-LABEL: @test1( ; CHECK-NEXT: [[C:%.*]] = fadd float [[A:%.*]], [[B:%.*]] -; CHECK-NEXT: store float [[C]], float* [[PA:%.*]], align 4 -; CHECK-NEXT: store float [[C]], float* [[PB:%.*]], align 4 +; CHECK-NEXT: store float [[C]], ptr [[PA:%.*]], align 4 +; CHECK-NEXT: store float [[C]], ptr [[PB:%.*]], align 4 ; CHECK-NEXT: ret void ; %C = fadd float %A, %B @@ -19,8 +19,8 @@ define void @test2(float %A, float %B, i1* %PA, i1* %PB) { ; CHECK-LABEL: @test2( ; CHECK-NEXT: [[C:%.*]] = fcmp oeq float [[A:%.*]], [[B:%.*]] -; CHECK-NEXT: store i1 [[C]], i1* [[PA:%.*]], align 1 -; CHECK-NEXT: store i1 [[C]], i1* [[PB:%.*]], align 1 +; CHECK-NEXT: store i1 [[C]], ptr [[PA:%.*]], align 1 +; CHECK-NEXT: store i1 [[C]], ptr [[PB:%.*]], align 1 ; CHECK-NEXT: ret void ; %C = fcmp oeq float %A, %B @@ -33,8 +33,8 @@ define void @test3(float %A, float %B, i1* %PA, i1* %PB) { ; CHECK-LABEL: @test3( ; CHECK-NEXT: [[C:%.*]] = fcmp uge float [[A:%.*]], [[B:%.*]] -; CHECK-NEXT: store i1 [[C]], i1* [[PA:%.*]], align 1 -; CHECK-NEXT: store i1 [[C]], i1* [[PB:%.*]], align 1 +; CHECK-NEXT: store i1 [[C]], ptr [[PA:%.*]], align 1 +; CHECK-NEXT: store i1 [[C]], ptr [[PB:%.*]], align 1 ; CHECK-NEXT: ret void ; %C = fcmp uge float %A, %B @@ -47,8 +47,8 @@ define void @test4(i32 %A, i32 %B, i1* %PA, i1* %PB) { ; CHECK-LABEL: @test4( ; CHECK-NEXT: [[C:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]] -; CHECK-NEXT: store i1 [[C]], i1* [[PA:%.*]], align 1 -; CHECK-NEXT: store i1 [[C]], i1* [[PB:%.*]], align 1 +; CHECK-NEXT: store i1 [[C]], ptr [[PA:%.*]], align 1 +; CHECK-NEXT: store i1 [[C]], ptr [[PB:%.*]], align 1 ; CHECK-NEXT: ret void ; %C = icmp eq i32 %A, %B @@ -61,8 +61,8 @@ define void @test5(i32 %A, i32 %B, i1* %PA, i1* %PB) { ; CHECK-LABEL: @test5( ; CHECK-NEXT: [[C:%.*]] = icmp sgt i32 [[A:%.*]], [[B:%.*]] -; CHECK-NEXT: store i1 [[C]], i1* [[PA:%.*]], align 1 -; CHECK-NEXT: store i1 [[C]], i1* [[PB:%.*]], align 1 +; CHECK-NEXT: store i1 [[C]], ptr [[PA:%.*]], align 1 +; CHECK-NEXT: store i1 [[C]], ptr [[PB:%.*]], align 1 ; CHECK-NEXT: ret void ; %C = icmp sgt i32 %A, %B @@ -77,8 +77,8 @@ define void @test6(float %f, i1* %p1, i1* %p2) { ; CHECK-LABEL: @test6( ; CHECK-NEXT: [[C1:%.*]] = fcmp ult float [[F:%.*]], [[F]] -; CHECK-NEXT: store i1 [[C1]], i1* [[P1:%.*]], align 1 -; CHECK-NEXT: store i1 [[C1]], i1* [[P2:%.*]], align 1 +; CHECK-NEXT: store i1 [[C1]], ptr [[P1:%.*]], align 1 +; CHECK-NEXT: store i1 [[C1]], ptr [[P2:%.*]], align 1 ; CHECK-NEXT: ret void ; %c1 = fcmp ult float %f, %f @@ -748,14 +748,14 @@ ; negation of each negation to check for the same issue one level deeper. define void @not_not_min(i32* %px, i32* %py, i32* %pout) { ; CHECK-LABEL: @not_not_min( -; CHECK-NEXT: [[X:%.*]] = load volatile i32, i32* [[PX:%.*]], align 4 -; CHECK-NEXT: [[Y:%.*]] = load volatile i32, i32* [[PY:%.*]], align 4 +; CHECK-NEXT: [[X:%.*]] = load volatile i32, ptr [[PX:%.*]], align 4 +; CHECK-NEXT: [[Y:%.*]] = load volatile i32, ptr [[PY:%.*]], align 4 ; CHECK-NEXT: [[CMPA:%.*]] = icmp slt i32 [[X]], [[Y]] ; CHECK-NEXT: [[CMPB:%.*]] = xor i1 [[CMPA]], true ; CHECK-NEXT: [[RA:%.*]] = select i1 [[CMPA]], i32 [[X]], i32 [[Y]] -; CHECK-NEXT: store volatile i32 [[RA]], i32* [[POUT:%.*]], align 4 -; CHECK-NEXT: store volatile i32 [[RA]], i32* [[POUT]], align 4 -; CHECK-NEXT: store volatile i32 [[RA]], i32* [[POUT]], align 4 +; CHECK-NEXT: store volatile i32 [[RA]], ptr [[POUT:%.*]], align 4 +; CHECK-NEXT: store volatile i32 [[RA]], ptr [[POUT]], align 4 +; CHECK-NEXT: store volatile i32 [[RA]], ptr [[POUT]], align 4 ; CHECK-NEXT: ret void ; %x = load volatile i32, i32* %px Index: llvm/test/Transforms/EarlyCSE/flags.ll =================================================================== --- llvm/test/Transforms/EarlyCSE/flags.ll +++ llvm/test/Transforms/EarlyCSE/flags.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -early-cse -earlycse-debug-hash -S < %s | FileCheck %s -; RUN: opt -basic-aa -early-cse-memssa -S < %s | FileCheck %s +; RUN: opt -early-cse -earlycse-debug-hash -normalize-opaque-pointers -S < %s | FileCheck %s +; RUN: opt -basic-aa -early-cse-memssa -normalize-opaque-pointers -S < %s | FileCheck %s declare void @use(i1) @@ -22,9 +22,9 @@ define void @test_inbounds_program_ub_if_first_gep_poison(i8* %ptr, i64 %n) { ; CHECK-LABEL: @test_inbounds_program_ub_if_first_gep_poison( -; CHECK-NEXT: [[ADD_PTR_1:%.*]] = getelementptr inbounds i8, i8* [[PTR:%.*]], i64 [[N:%.*]] -; CHECK-NEXT: call void @use.i8(i8* noundef [[ADD_PTR_1]]) -; CHECK-NEXT: call void @use.i8(i8* [[ADD_PTR_1]]) +; CHECK-NEXT: [[ADD_PTR_1:%.*]] = getelementptr inbounds i8, ptr [[PTR:%.*]], i64 [[N:%.*]] +; CHECK-NEXT: call void @use.i8(ptr noundef [[ADD_PTR_1]]) +; CHECK-NEXT: call void @use.i8(ptr [[ADD_PTR_1]]) ; CHECK-NEXT: ret void ; %add.ptr.1 = getelementptr inbounds i8, i8* %ptr, i64 %n @@ -36,9 +36,9 @@ define void @test_inbounds_program_not_ub_if_first_gep_poison(i8* %ptr, i64 %n) { ; CHECK-LABEL: @test_inbounds_program_not_ub_if_first_gep_poison( -; CHECK-NEXT: [[ADD_PTR_1:%.*]] = getelementptr i8, i8* [[PTR:%.*]], i64 [[N:%.*]] -; CHECK-NEXT: call void @use.i8(i8* [[ADD_PTR_1]]) -; CHECK-NEXT: call void @use.i8(i8* [[ADD_PTR_1]]) +; CHECK-NEXT: [[ADD_PTR_1:%.*]] = getelementptr i8, ptr [[PTR:%.*]], i64 [[N:%.*]] +; CHECK-NEXT: call void @use.i8(ptr [[ADD_PTR_1]]) +; CHECK-NEXT: call void @use.i8(ptr [[ADD_PTR_1]]) ; CHECK-NEXT: ret void ; %add.ptr.1 = getelementptr inbounds i8, i8* %ptr, i64 %n Index: llvm/test/Transforms/EarlyCSE/guards.ll =================================================================== --- llvm/test/Transforms/EarlyCSE/guards.ll +++ llvm/test/Transforms/EarlyCSE/guards.ll @@ -1,7 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -S -early-cse -earlycse-debug-hash < %s | FileCheck %s --check-prefixes=CHECK,NO_ASSUME -; RUN: opt < %s -S -basic-aa -early-cse-memssa | FileCheck %s --check-prefixes=CHECK,NO_ASSUME -; RUN: opt < %s -S -basic-aa -early-cse-memssa --enable-knowledge-retention | FileCheck %s --check-prefixes=CHECK,USE_ASSUME +; RUN: opt -S -early-cse -earlycse-debug-hash -normalize-opaque-pointers < %s | FileCheck %s --check-prefixes=CHECK,NO_ASSUME +; RUN: opt < %s -S -basic-aa -early-cse-memssa -normalize-opaque-pointers | FileCheck %s --check-prefixes=CHECK,NO_ASSUME +; RUN: opt < %s -S -basic-aa -early-cse-memssa --enable-knowledge-retention -normalize-opaque-pointers | FileCheck %s --check-prefixes=CHECK,USE_ASSUME declare void @llvm.experimental.guard(i1,...) @@ -11,14 +11,14 @@ ; We can do store to load forwarding over a guard, since it does not ; clobber memory ; NO_ASSUME-LABEL: @test0( -; NO_ASSUME-NEXT: store i32 40, i32* [[PTR:%.*]], align 4 +; NO_ASSUME-NEXT: store i32 40, ptr [[PTR:%.*]], align 4 ; NO_ASSUME-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[COND:%.*]]) [ "deopt"() ] ; NO_ASSUME-NEXT: ret i32 40 ; ; USE_ASSUME-LABEL: @test0( -; USE_ASSUME-NEXT: store i32 40, i32* [[PTR:%.*]], align 4 +; USE_ASSUME-NEXT: store i32 40, ptr [[PTR:%.*]], align 4 ; USE_ASSUME-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[COND:%.*]]) [ "deopt"() ] -; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[PTR]], i64 4), "nonnull"(i32* [[PTR]]), "align"(i32* [[PTR]], i64 4) ] +; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[PTR]], i64 4), "nonnull"(ptr [[PTR]]), "align"(ptr [[PTR]], i64 4) ] ; USE_ASSUME-NEXT: ret i32 40 ; @@ -31,14 +31,14 @@ define i32 @test1(i32* %val, i1 %cond) { ; We can CSE loads over a guard, since it does not clobber memory ; NO_ASSUME-LABEL: @test1( -; NO_ASSUME-NEXT: [[VAL0:%.*]] = load i32, i32* [[VAL:%.*]], align 4 +; NO_ASSUME-NEXT: [[VAL0:%.*]] = load i32, ptr [[VAL:%.*]], align 4 ; NO_ASSUME-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[COND:%.*]]) [ "deopt"() ] ; NO_ASSUME-NEXT: ret i32 0 ; ; USE_ASSUME-LABEL: @test1( -; USE_ASSUME-NEXT: [[VAL0:%.*]] = load i32, i32* [[VAL:%.*]], align 4 +; USE_ASSUME-NEXT: [[VAL0:%.*]] = load i32, ptr [[VAL:%.*]], align 4 ; USE_ASSUME-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[COND:%.*]]) [ "deopt"() ] -; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[VAL]], i64 4), "nonnull"(i32* [[VAL]]), "align"(i32* [[VAL]], i64 4) ] +; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[VAL]], i64 4), "nonnull"(ptr [[VAL]]), "align"(ptr [[VAL]], i64 4) ] ; USE_ASSUME-NEXT: ret i32 0 ; @@ -185,9 +185,9 @@ ; Guard intrinsics do _read_ memory, so th call to guard below needs ; to see the store of 500 to %ptr ; CHECK-LABEL: @test6( -; CHECK-NEXT: store i32 500, i32* [[PTR:%.*]], align 4 +; CHECK-NEXT: store i32 500, ptr [[PTR:%.*]], align 4 ; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[C:%.*]]) [ "deopt"() ] -; CHECK-NEXT: store i32 600, i32* [[PTR]], align 4 +; CHECK-NEXT: store i32 600, ptr [[PTR]], align 4 ; CHECK-NEXT: ret void ; @@ -219,17 +219,17 @@ ; block in case when the condition is not recalculated. ; NO_ASSUME-LABEL: @test08( ; NO_ASSUME-NEXT: [[CMP:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]] -; NO_ASSUME-NEXT: store i32 100, i32* [[PTR:%.*]], align 4 +; NO_ASSUME-NEXT: store i32 100, ptr [[PTR:%.*]], align 4 ; NO_ASSUME-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[CMP]]) [ "deopt"() ] -; NO_ASSUME-NEXT: store i32 400, i32* [[PTR]], align 4 +; NO_ASSUME-NEXT: store i32 400, ptr [[PTR]], align 4 ; NO_ASSUME-NEXT: ret void ; ; USE_ASSUME-LABEL: @test08( ; USE_ASSUME-NEXT: [[CMP:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]] -; USE_ASSUME-NEXT: store i32 100, i32* [[PTR:%.*]], align 4 +; USE_ASSUME-NEXT: store i32 100, ptr [[PTR:%.*]], align 4 ; USE_ASSUME-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[CMP]]) [ "deopt"() ] -; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[PTR]], i64 4), "nonnull"(i32* [[PTR]]), "align"(i32* [[PTR]], i64 4) ] -; USE_ASSUME-NEXT: store i32 400, i32* [[PTR]], align 4 +; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[PTR]], i64 4), "nonnull"(ptr [[PTR]]), "align"(ptr [[PTR]], i64 4) ] +; USE_ASSUME-NEXT: store i32 400, ptr [[PTR]], align 4 ; USE_ASSUME-NEXT: ret void ; @@ -251,15 +251,15 @@ ; NO_ASSUME-LABEL: @test09( ; NO_ASSUME-NEXT: entry: ; NO_ASSUME-NEXT: [[CMP:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]] -; NO_ASSUME-NEXT: store i32 100, i32* [[PTR:%.*]], align 4 +; NO_ASSUME-NEXT: store i32 100, ptr [[PTR:%.*]], align 4 ; NO_ASSUME-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[CMP]]) [ "deopt"() ] -; NO_ASSUME-NEXT: store i32 400, i32* [[PTR]], align 4 +; NO_ASSUME-NEXT: store i32 400, ptr [[PTR]], align 4 ; NO_ASSUME-NEXT: br i1 [[C:%.*]], label [[IF_TRUE:%.*]], label [[IF_FALSE:%.*]] ; NO_ASSUME: if.true: -; NO_ASSUME-NEXT: store i32 500, i32* [[PTR]], align 4 +; NO_ASSUME-NEXT: store i32 500, ptr [[PTR]], align 4 ; NO_ASSUME-NEXT: br label [[MERGE:%.*]] ; NO_ASSUME: if.false: -; NO_ASSUME-NEXT: store i32 600, i32* [[PTR]], align 4 +; NO_ASSUME-NEXT: store i32 600, ptr [[PTR]], align 4 ; NO_ASSUME-NEXT: br label [[MERGE]] ; NO_ASSUME: merge: ; NO_ASSUME-NEXT: ret void @@ -267,16 +267,16 @@ ; USE_ASSUME-LABEL: @test09( ; USE_ASSUME-NEXT: entry: ; USE_ASSUME-NEXT: [[CMP:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]] -; USE_ASSUME-NEXT: store i32 100, i32* [[PTR:%.*]], align 4 +; USE_ASSUME-NEXT: store i32 100, ptr [[PTR:%.*]], align 4 ; USE_ASSUME-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[CMP]]) [ "deopt"() ] -; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[PTR]], i64 4), "nonnull"(i32* [[PTR]]), "align"(i32* [[PTR]], i64 4) ] -; USE_ASSUME-NEXT: store i32 400, i32* [[PTR]], align 4 +; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[PTR]], i64 4), "nonnull"(ptr [[PTR]]), "align"(ptr [[PTR]], i64 4) ] +; USE_ASSUME-NEXT: store i32 400, ptr [[PTR]], align 4 ; USE_ASSUME-NEXT: br i1 [[C:%.*]], label [[IF_TRUE:%.*]], label [[IF_FALSE:%.*]] ; USE_ASSUME: if.true: -; USE_ASSUME-NEXT: store i32 500, i32* [[PTR]], align 4 +; USE_ASSUME-NEXT: store i32 500, ptr [[PTR]], align 4 ; USE_ASSUME-NEXT: br label [[MERGE:%.*]] ; USE_ASSUME: if.false: -; USE_ASSUME-NEXT: store i32 600, i32* [[PTR]], align 4 +; USE_ASSUME-NEXT: store i32 600, ptr [[PTR]], align 4 ; USE_ASSUME-NEXT: br label [[MERGE]] ; USE_ASSUME: merge: ; USE_ASSUME-NEXT: ret void @@ -315,15 +315,15 @@ ; CHECK-NEXT: br i1 [[C:%.*]], label [[IF_TRUE:%.*]], label [[IF_FALSE:%.*]] ; CHECK: if.true: ; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[CMP]]) [ "deopt"() ] -; CHECK-NEXT: store i32 100, i32* [[PTR:%.*]], align 4 +; CHECK-NEXT: store i32 100, ptr [[PTR:%.*]], align 4 ; CHECK-NEXT: br label [[MERGE:%.*]] ; CHECK: if.false: -; CHECK-NEXT: store i32 200, i32* [[PTR]], align 4 +; CHECK-NEXT: store i32 200, ptr [[PTR]], align 4 ; CHECK-NEXT: br label [[MERGE]] ; CHECK: merge: -; CHECK-NEXT: store i32 300, i32* [[PTR]], align 4 +; CHECK-NEXT: store i32 300, ptr [[PTR]], align 4 ; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[CMP]]) [ "deopt"() ] -; CHECK-NEXT: store i32 400, i32* [[PTR]], align 4 +; CHECK-NEXT: store i32 400, ptr [[PTR]], align 4 ; CHECK-NEXT: ret void ; @@ -401,14 +401,14 @@ ; NO_ASSUME-LABEL: @test13( ; NO_ASSUME-NEXT: [[CMP:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]] ; NO_ASSUME-NEXT: call void @llvm.assume(i1 [[CMP]]) -; NO_ASSUME-NEXT: store i32 400, i32* [[PTR:%.*]], align 4 +; NO_ASSUME-NEXT: store i32 400, ptr [[PTR:%.*]], align 4 ; NO_ASSUME-NEXT: ret void ; ; USE_ASSUME-LABEL: @test13( ; USE_ASSUME-NEXT: [[CMP:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]] ; USE_ASSUME-NEXT: call void @llvm.assume(i1 [[CMP]]) -; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[PTR:%.*]], i64 4), "nonnull"(i32* [[PTR]]), "align"(i32* [[PTR]], i64 4) ] -; USE_ASSUME-NEXT: store i32 400, i32* [[PTR]], align 4 +; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[PTR:%.*]], i64 4), "nonnull"(ptr [[PTR]]), "align"(ptr [[PTR]], i64 4) ] +; USE_ASSUME-NEXT: store i32 400, ptr [[PTR]], align 4 ; USE_ASSUME-NEXT: ret void ; @@ -432,13 +432,13 @@ ; NO_ASSUME-NEXT: entry: ; NO_ASSUME-NEXT: [[CMP:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]] ; NO_ASSUME-NEXT: call void @llvm.assume(i1 [[CMP]]) -; NO_ASSUME-NEXT: store i32 400, i32* [[PTR:%.*]], align 4 +; NO_ASSUME-NEXT: store i32 400, ptr [[PTR:%.*]], align 4 ; NO_ASSUME-NEXT: br i1 [[C:%.*]], label [[IF_TRUE:%.*]], label [[IF_FALSE:%.*]] ; NO_ASSUME: if.true: -; NO_ASSUME-NEXT: store i32 500, i32* [[PTR]], align 4 +; NO_ASSUME-NEXT: store i32 500, ptr [[PTR]], align 4 ; NO_ASSUME-NEXT: br label [[MERGE:%.*]] ; NO_ASSUME: if.false: -; NO_ASSUME-NEXT: store i32 600, i32* [[PTR]], align 4 +; NO_ASSUME-NEXT: store i32 600, ptr [[PTR]], align 4 ; NO_ASSUME-NEXT: br label [[MERGE]] ; NO_ASSUME: merge: ; NO_ASSUME-NEXT: ret void @@ -447,14 +447,14 @@ ; USE_ASSUME-NEXT: entry: ; USE_ASSUME-NEXT: [[CMP:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]] ; USE_ASSUME-NEXT: call void @llvm.assume(i1 [[CMP]]) -; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[PTR:%.*]], i64 4), "nonnull"(i32* [[PTR]]), "align"(i32* [[PTR]], i64 4) ] -; USE_ASSUME-NEXT: store i32 400, i32* [[PTR]], align 4 +; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[PTR:%.*]], i64 4), "nonnull"(ptr [[PTR]]), "align"(ptr [[PTR]], i64 4) ] +; USE_ASSUME-NEXT: store i32 400, ptr [[PTR]], align 4 ; USE_ASSUME-NEXT: br i1 [[C:%.*]], label [[IF_TRUE:%.*]], label [[IF_FALSE:%.*]] ; USE_ASSUME: if.true: -; USE_ASSUME-NEXT: store i32 500, i32* [[PTR]], align 4 +; USE_ASSUME-NEXT: store i32 500, ptr [[PTR]], align 4 ; USE_ASSUME-NEXT: br label [[MERGE:%.*]] ; USE_ASSUME: if.false: -; USE_ASSUME-NEXT: store i32 600, i32* [[PTR]], align 4 +; USE_ASSUME-NEXT: store i32 600, ptr [[PTR]], align 4 ; USE_ASSUME-NEXT: br label [[MERGE]] ; USE_ASSUME: merge: ; USE_ASSUME-NEXT: ret void @@ -494,15 +494,15 @@ ; CHECK-NEXT: br i1 [[C:%.*]], label [[IF_TRUE:%.*]], label [[IF_FALSE:%.*]] ; CHECK: if.true: ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]]) -; CHECK-NEXT: store i32 100, i32* [[PTR:%.*]], align 4 +; CHECK-NEXT: store i32 100, ptr [[PTR:%.*]], align 4 ; CHECK-NEXT: br label [[MERGE:%.*]] ; CHECK: if.false: -; CHECK-NEXT: store i32 200, i32* [[PTR]], align 4 +; CHECK-NEXT: store i32 200, ptr [[PTR]], align 4 ; CHECK-NEXT: br label [[MERGE]] ; CHECK: merge: -; CHECK-NEXT: store i32 300, i32* [[PTR]], align 4 +; CHECK-NEXT: store i32 300, ptr [[PTR]], align 4 ; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[CMP]]) [ "deopt"() ] -; CHECK-NEXT: store i32 400, i32* [[PTR]], align 4 +; CHECK-NEXT: store i32 400, ptr [[PTR]], align 4 ; CHECK-NEXT: ret void ; Index: llvm/test/Transforms/EarlyCSE/invariant-loads.ll =================================================================== --- llvm/test/Transforms/EarlyCSE/invariant-loads.ll +++ llvm/test/Transforms/EarlyCSE/invariant-loads.ll @@ -1,22 +1,22 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -S -early-cse -earlycse-debug-hash < %s | FileCheck %s --check-prefixes=CHECK,NO_ASSUME -; RUN: opt -S -basic-aa -early-cse-memssa < %s | FileCheck %s --check-prefixes=CHECK,NO_ASSUME -; RUN: opt -S -basic-aa -early-cse-memssa --enable-knowledge-retention < %s | FileCheck %s --check-prefixes=CHECK,USE_ASSUME +; RUN: opt -S -early-cse -earlycse-debug-hash -normalize-opaque-pointers < %s | FileCheck %s --check-prefixes=CHECK,NO_ASSUME +; RUN: opt -S -basic-aa -early-cse-memssa -normalize-opaque-pointers < %s | FileCheck %s --check-prefixes=CHECK,NO_ASSUME +; RUN: opt -S -basic-aa -early-cse-memssa --enable-knowledge-retention -normalize-opaque-pointers < %s | FileCheck %s --check-prefixes=CHECK,USE_ASSUME declare void @clobber_and_use(i32) define void @f_0(i32* %ptr) { ; NO_ASSUME-LABEL: @f_0( -; NO_ASSUME-NEXT: [[VAL0:%.*]] = load i32, i32* [[PTR:%.*]], align 4, !invariant.load !0 +; NO_ASSUME-NEXT: [[VAL0:%.*]] = load i32, ptr [[PTR:%.*]], align 4, !invariant.load !0 ; NO_ASSUME-NEXT: call void @clobber_and_use(i32 [[VAL0]]) ; NO_ASSUME-NEXT: call void @clobber_and_use(i32 [[VAL0]]) ; NO_ASSUME-NEXT: call void @clobber_and_use(i32 [[VAL0]]) ; NO_ASSUME-NEXT: ret void ; ; USE_ASSUME-LABEL: @f_0( -; USE_ASSUME-NEXT: [[VAL0:%.*]] = load i32, i32* [[PTR:%.*]], align 4, !invariant.load !0 +; USE_ASSUME-NEXT: [[VAL0:%.*]] = load i32, ptr [[PTR:%.*]], align 4, !invariant.load !0 ; USE_ASSUME-NEXT: call void @clobber_and_use(i32 [[VAL0]]) -; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[PTR]], i64 4), "nonnull"(i32* [[PTR]]), "align"(i32* [[PTR]], i64 4) ] +; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[PTR]], i64 4), "nonnull"(ptr [[PTR]]), "align"(ptr [[PTR]], i64 4) ] ; USE_ASSUME-NEXT: call void @clobber_and_use(i32 [[VAL0]]) ; USE_ASSUME-NEXT: call void @clobber_and_use(i32 [[VAL0]]) ; USE_ASSUME-NEXT: ret void @@ -34,15 +34,15 @@ define void @f_1(i32* %ptr) { ; We can forward invariant loads to non-invariant loads. ; NO_ASSUME-LABEL: @f_1( -; NO_ASSUME-NEXT: [[VAL0:%.*]] = load i32, i32* [[PTR:%.*]], align 4, !invariant.load !0 +; NO_ASSUME-NEXT: [[VAL0:%.*]] = load i32, ptr [[PTR:%.*]], align 4, !invariant.load !0 ; NO_ASSUME-NEXT: call void @clobber_and_use(i32 [[VAL0]]) ; NO_ASSUME-NEXT: call void @clobber_and_use(i32 [[VAL0]]) ; NO_ASSUME-NEXT: ret void ; ; USE_ASSUME-LABEL: @f_1( -; USE_ASSUME-NEXT: [[VAL0:%.*]] = load i32, i32* [[PTR:%.*]], align 4, !invariant.load !0 +; USE_ASSUME-NEXT: [[VAL0:%.*]] = load i32, ptr [[PTR:%.*]], align 4, !invariant.load !0 ; USE_ASSUME-NEXT: call void @clobber_and_use(i32 [[VAL0]]) -; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[PTR]], i64 4), "nonnull"(i32* [[PTR]]), "align"(i32* [[PTR]], i64 4) ] +; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[PTR]], i64 4), "nonnull"(ptr [[PTR]]), "align"(ptr [[PTR]], i64 4) ] ; USE_ASSUME-NEXT: call void @clobber_and_use(i32 [[VAL0]]) ; USE_ASSUME-NEXT: ret void ; @@ -57,15 +57,15 @@ define void @f_2(i32* %ptr) { ; We can forward a non-invariant load into an invariant load. ; NO_ASSUME-LABEL: @f_2( -; NO_ASSUME-NEXT: [[VAL0:%.*]] = load i32, i32* [[PTR:%.*]], align 4 +; NO_ASSUME-NEXT: [[VAL0:%.*]] = load i32, ptr [[PTR:%.*]], align 4 ; NO_ASSUME-NEXT: call void @clobber_and_use(i32 [[VAL0]]) ; NO_ASSUME-NEXT: call void @clobber_and_use(i32 [[VAL0]]) ; NO_ASSUME-NEXT: ret void ; ; USE_ASSUME-LABEL: @f_2( -; USE_ASSUME-NEXT: [[VAL0:%.*]] = load i32, i32* [[PTR:%.*]], align 4 +; USE_ASSUME-NEXT: [[VAL0:%.*]] = load i32, ptr [[PTR:%.*]], align 4 ; USE_ASSUME-NEXT: call void @clobber_and_use(i32 [[VAL0]]) -; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[PTR]], i64 4), "nonnull"(i32* [[PTR]]), "align"(i32* [[PTR]], i64 4) ] +; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[PTR]], i64 4), "nonnull"(ptr [[PTR]]), "align"(ptr [[PTR]], i64 4) ] ; USE_ASSUME-NEXT: call void @clobber_and_use(i32 [[VAL0]]) ; USE_ASSUME-NEXT: ret void ; @@ -79,7 +79,7 @@ define void @f_3(i1 %cond, i32* %ptr) { ; NO_ASSUME-LABEL: @f_3( -; NO_ASSUME-NEXT: [[VAL0:%.*]] = load i32, i32* [[PTR:%.*]], align 4, !invariant.load !0 +; NO_ASSUME-NEXT: [[VAL0:%.*]] = load i32, ptr [[PTR:%.*]], align 4, !invariant.load !0 ; NO_ASSUME-NEXT: call void @clobber_and_use(i32 [[VAL0]]) ; NO_ASSUME-NEXT: br i1 [[COND:%.*]], label [[LEFT:%.*]], label [[RIGHT:%.*]] ; NO_ASSUME: left: @@ -89,11 +89,11 @@ ; NO_ASSUME-NEXT: ret void ; ; USE_ASSUME-LABEL: @f_3( -; USE_ASSUME-NEXT: [[VAL0:%.*]] = load i32, i32* [[PTR:%.*]], align 4, !invariant.load !0 +; USE_ASSUME-NEXT: [[VAL0:%.*]] = load i32, ptr [[PTR:%.*]], align 4, !invariant.load !0 ; USE_ASSUME-NEXT: call void @clobber_and_use(i32 [[VAL0]]) ; USE_ASSUME-NEXT: br i1 [[COND:%.*]], label [[LEFT:%.*]], label [[RIGHT:%.*]] ; USE_ASSUME: left: -; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[PTR]], i64 4), "nonnull"(i32* [[PTR]]), "align"(i32* [[PTR]], i64 4) ] +; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[PTR]], i64 4), "nonnull"(ptr [[PTR]]), "align"(ptr [[PTR]], i64 4) ] ; USE_ASSUME-NEXT: call void @clobber_and_use(i32 [[VAL0]]) ; USE_ASSUME-NEXT: ret void ; USE_ASSUME: right: @@ -119,11 +119,11 @@ ; CHECK-LABEL: @f_4( ; CHECK-NEXT: br i1 [[COND:%.*]], label [[LEFT:%.*]], label [[MERGE:%.*]] ; CHECK: left: -; CHECK-NEXT: [[VAL0:%.*]] = load i32, i32* [[PTR:%.*]], align 4, !invariant.load !0 +; CHECK-NEXT: [[VAL0:%.*]] = load i32, ptr [[PTR:%.*]], align 4, !invariant.load !0 ; CHECK-NEXT: call void @clobber_and_use(i32 [[VAL0]]) ; CHECK-NEXT: br label [[MERGE]] ; CHECK: merge: -; CHECK-NEXT: [[VAL1:%.*]] = load i32, i32* [[PTR]], align 4 +; CHECK-NEXT: [[VAL1:%.*]] = load i32, ptr [[PTR]], align 4 ; CHECK-NEXT: call void @clobber_and_use(i32 [[VAL1]]) ; CHECK-NEXT: ret void ; @@ -148,14 +148,14 @@ ; to restore the same unchanging value. define void @test_dse1(i32* %p) { ; NO_ASSUME-LABEL: @test_dse1( -; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P:%.*]], align 4, !invariant.load !0 +; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, ptr [[P:%.*]], align 4, !invariant.load !0 ; NO_ASSUME-NEXT: call void @clobber_and_use(i32 [[V1]]) ; NO_ASSUME-NEXT: ret void ; ; USE_ASSUME-LABEL: @test_dse1( -; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P:%.*]], align 4, !invariant.load !0 +; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, ptr [[P:%.*]], align 4, !invariant.load !0 ; USE_ASSUME-NEXT: call void @clobber_and_use(i32 [[V1]]) -; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]), "align"(i32* [[P]], i64 4) ] +; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[P]], i64 4), "nonnull"(ptr [[P]]), "align"(ptr [[P]], i64 4) ] ; USE_ASSUME-NEXT: ret void ; %v1 = load i32, i32* %p, !invariant.load !{} @@ -167,9 +167,9 @@ ; By assumption, v1 must equal v2 (TODO) define void @test_false_negative_dse2(i32* %p, i32 %v2) { ; CHECK-LABEL: @test_false_negative_dse2( -; CHECK-NEXT: [[V1:%.*]] = load i32, i32* [[P:%.*]], align 4, !invariant.load !0 +; CHECK-NEXT: [[V1:%.*]] = load i32, ptr [[P:%.*]], align 4, !invariant.load !0 ; CHECK-NEXT: call void @clobber_and_use(i32 [[V1]]) -; CHECK-NEXT: store i32 [[V2:%.*]], i32* [[P]], align 4 +; CHECK-NEXT: store i32 [[V2:%.*]], ptr [[P]], align 4 ; CHECK-NEXT: ret void ; %v1 = load i32, i32* %p, !invariant.load !{} @@ -182,15 +182,15 @@ ; it lets us remove later loads not explicitly marked invariant define void @test_scope_start_without_load(i32* %p) { ; NO_ASSUME-LABEL: @test_scope_start_without_load( -; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P:%.*]], align 4 +; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, ptr [[P:%.*]], align 4 ; NO_ASSUME-NEXT: [[ADD:%.*]] = add i32 [[V1]], [[V1]] ; NO_ASSUME-NEXT: call void @clobber_and_use(i32 [[ADD]]) ; NO_ASSUME-NEXT: call void @clobber_and_use(i32 [[V1]]) ; NO_ASSUME-NEXT: ret void ; ; USE_ASSUME-LABEL: @test_scope_start_without_load( -; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P:%.*]], align 4 -; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]), "align"(i32* [[P]], i64 4) ] +; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, ptr [[P:%.*]], align 4 +; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[P]], i64 4), "nonnull"(ptr [[P]]), "align"(ptr [[P]], i64 4) ] ; USE_ASSUME-NEXT: [[ADD:%.*]] = add i32 [[V1]], [[V1]] ; USE_ASSUME-NEXT: call void @clobber_and_use(i32 [[ADD]]) ; USE_ASSUME-NEXT: call void @clobber_and_use(i32 [[V1]]) @@ -210,7 +210,7 @@ ; load define void @test_scope_restart(i32* %p) { ; NO_ASSUME-LABEL: @test_scope_restart( -; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P:%.*]], align 4, !invariant.load !0 +; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, ptr [[P:%.*]], align 4, !invariant.load !0 ; NO_ASSUME-NEXT: call void @clobber_and_use(i32 [[V1]]) ; NO_ASSUME-NEXT: [[ADD:%.*]] = add i32 [[V1]], [[V1]] ; NO_ASSUME-NEXT: call void @clobber_and_use(i32 [[ADD]]) @@ -218,9 +218,9 @@ ; NO_ASSUME-NEXT: ret void ; ; USE_ASSUME-LABEL: @test_scope_restart( -; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P:%.*]], align 4, !invariant.load !0 +; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, ptr [[P:%.*]], align 4, !invariant.load !0 ; USE_ASSUME-NEXT: call void @clobber_and_use(i32 [[V1]]) -; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]), "align"(i32* [[P]], i64 4) ] +; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[P]], i64 4), "nonnull"(ptr [[P]]), "align"(ptr [[P]], i64 4) ] ; USE_ASSUME-NEXT: [[ADD:%.*]] = add i32 [[V1]], [[V1]] ; USE_ASSUME-NEXT: call void @clobber_and_use(i32 [[ADD]]) ; USE_ASSUME-NEXT: call void @clobber_and_use(i32 [[V1]]) Index: llvm/test/Transforms/EarlyCSE/invariant.start.ll =================================================================== --- llvm/test/Transforms/EarlyCSE/invariant.start.ll +++ llvm/test/Transforms/EarlyCSE/invariant.start.ll @@ -1,7 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature -; RUN: opt < %s -S -early-cse -earlycse-debug-hash | FileCheck %s --check-prefixes=CHECK,NO_ASSUME -; RUN: opt < %s -S -early-cse --enable-knowledge-retention | FileCheck %s --check-prefixes=CHECK,USE_ASSUME -; RUN: opt < %s -S -passes=early-cse | FileCheck %s --check-prefixes=CHECK,NO_ASSUME +; RUN: opt < %s -S -early-cse -earlycse-debug-hash -normalize-opaque-pointers | FileCheck %s --check-prefixes=CHECK,NO_ASSUME +; RUN: opt < %s -S -early-cse --enable-knowledge-retention -normalize-opaque-pointers | FileCheck %s --check-prefixes=CHECK,USE_ASSUME +; RUN: opt < %s -S -passes=early-cse -normalize-opaque-pointers | FileCheck %s --check-prefixes=CHECK,NO_ASSUME declare {}* @llvm.invariant.start.p0i8(i64, i8* nocapture) nounwind readonly declare void @llvm.invariant.end.p0i8({}*, i64, i8* nocapture) nounwind @@ -10,16 +10,16 @@ ; clobber memory define i8 @test_bypass1(i8 *%P) { ; NO_ASSUME-LABEL: define {{[^@]+}}@test_bypass1 -; NO_ASSUME-SAME: (i8* [[P:%.*]]) -; NO_ASSUME-NEXT: [[V1:%.*]] = load i8, i8* [[P]], align 1 -; NO_ASSUME-NEXT: [[I:%.*]] = call {}* @llvm.invariant.start.p0i8(i64 1, i8* [[P]]) +; NO_ASSUME-SAME: (ptr [[P:%.*]]) { +; NO_ASSUME-NEXT: [[V1:%.*]] = load i8, ptr [[P]], align 1 +; NO_ASSUME-NEXT: [[I:%.*]] = call ptr @llvm.invariant.start.p0(i64 1, ptr [[P]]) ; NO_ASSUME-NEXT: ret i8 0 ; ; USE_ASSUME-LABEL: define {{[^@]+}}@test_bypass1 -; USE_ASSUME-SAME: (i8* [[P:%.*]]) -; USE_ASSUME-NEXT: [[V1:%.*]] = load i8, i8* [[P]], align 1 -; USE_ASSUME-NEXT: [[I:%.*]] = call {}* @llvm.invariant.start.p0i8(i64 1, i8* [[P]]) -; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8* [[P]], i64 1), "nonnull"(i8* [[P]]) ] +; USE_ASSUME-SAME: (ptr [[P:%.*]]) { +; USE_ASSUME-NEXT: [[V1:%.*]] = load i8, ptr [[P]], align 1 +; USE_ASSUME-NEXT: [[I:%.*]] = call ptr @llvm.invariant.start.p0(i64 1, ptr [[P]]) +; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[P]], i64 1), "nonnull"(ptr [[P]]) ] ; USE_ASSUME-NEXT: ret i8 0 ; @@ -34,16 +34,16 @@ ; Trivial Store->load forwarding over invariant.start define i8 @test_bypass2(i8 *%P) { ; NO_ASSUME-LABEL: define {{[^@]+}}@test_bypass2 -; NO_ASSUME-SAME: (i8* [[P:%.*]]) -; NO_ASSUME-NEXT: store i8 42, i8* [[P]], align 1 -; NO_ASSUME-NEXT: [[I:%.*]] = call {}* @llvm.invariant.start.p0i8(i64 1, i8* [[P]]) +; NO_ASSUME-SAME: (ptr [[P:%.*]]) { +; NO_ASSUME-NEXT: store i8 42, ptr [[P]], align 1 +; NO_ASSUME-NEXT: [[I:%.*]] = call ptr @llvm.invariant.start.p0(i64 1, ptr [[P]]) ; NO_ASSUME-NEXT: ret i8 42 ; ; USE_ASSUME-LABEL: define {{[^@]+}}@test_bypass2 -; USE_ASSUME-SAME: (i8* [[P:%.*]]) -; USE_ASSUME-NEXT: store i8 42, i8* [[P]], align 1 -; USE_ASSUME-NEXT: [[I:%.*]] = call {}* @llvm.invariant.start.p0i8(i64 1, i8* [[P]]) -; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8* [[P]], i64 1), "nonnull"(i8* [[P]]) ] +; USE_ASSUME-SAME: (ptr [[P:%.*]]) { +; USE_ASSUME-NEXT: store i8 42, ptr [[P]], align 1 +; USE_ASSUME-NEXT: [[I:%.*]] = call ptr @llvm.invariant.start.p0(i64 1, ptr [[P]]) +; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[P]], i64 1), "nonnull"(ptr [[P]]) ] ; USE_ASSUME-NEXT: ret i8 42 ; @@ -58,16 +58,16 @@ ; of invariant.start. define void @test_bypass3(i8* %P) { ; NO_ASSUME-LABEL: define {{[^@]+}}@test_bypass3 -; NO_ASSUME-SAME: (i8* [[P:%.*]]) -; NO_ASSUME-NEXT: [[I:%.*]] = call {}* @llvm.invariant.start.p0i8(i64 1, i8* [[P]]) -; NO_ASSUME-NEXT: store i8 60, i8* [[P]], align 1 +; NO_ASSUME-SAME: (ptr [[P:%.*]]) { +; NO_ASSUME-NEXT: [[I:%.*]] = call ptr @llvm.invariant.start.p0(i64 1, ptr [[P]]) +; NO_ASSUME-NEXT: store i8 60, ptr [[P]], align 1 ; NO_ASSUME-NEXT: ret void ; ; USE_ASSUME-LABEL: define {{[^@]+}}@test_bypass3 -; USE_ASSUME-SAME: (i8* [[P:%.*]]) -; USE_ASSUME-NEXT: [[I:%.*]] = call {}* @llvm.invariant.start.p0i8(i64 1, i8* [[P]]) -; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8* [[P]], i64 1), "nonnull"(i8* [[P]]) ] -; USE_ASSUME-NEXT: store i8 60, i8* [[P]], align 1 +; USE_ASSUME-SAME: (ptr [[P:%.*]]) { +; USE_ASSUME-NEXT: [[I:%.*]] = call ptr @llvm.invariant.start.p0(i64 1, ptr [[P]]) +; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[P]], i64 1), "nonnull"(ptr [[P]]) ] +; USE_ASSUME-NEXT: store i8 60, ptr [[P]], align 1 ; USE_ASSUME-NEXT: ret void ; @@ -82,11 +82,11 @@ ; the invariant region, between start and end. define void @test_bypass4(i8* %P) { ; CHECK-LABEL: define {{[^@]+}}@test_bypass4 -; CHECK-SAME: (i8* [[P:%.*]]) -; CHECK-NEXT: store i8 50, i8* [[P]], align 1 -; CHECK-NEXT: [[I:%.*]] = call {}* @llvm.invariant.start.p0i8(i64 1, i8* [[P]]) -; CHECK-NEXT: call void @llvm.invariant.end.p0i8({}* [[I]], i64 1, i8* [[P]]) -; CHECK-NEXT: store i8 60, i8* [[P]], align 1 +; CHECK-SAME: (ptr [[P:%.*]]) { +; CHECK-NEXT: store i8 50, ptr [[P]], align 1 +; CHECK-NEXT: [[I:%.*]] = call ptr @llvm.invariant.start.p0(i64 1, ptr [[P]]) +; CHECK-NEXT: call void @llvm.invariant.end.p0(ptr [[I]], i64 1, ptr [[P]]) +; CHECK-NEXT: store i8 60, ptr [[P]], align 1 ; CHECK-NEXT: ret void ; @@ -105,18 +105,18 @@ define i32 @test_before_load(i32* %p) { ; NO_ASSUME-LABEL: define {{[^@]+}}@test_before_load -; NO_ASSUME-SAME: (i32* [[P:%.*]]) -; NO_ASSUME-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]]) -; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4 +; NO_ASSUME-SAME: (ptr [[P:%.*]]) { +; NO_ASSUME-NEXT: [[TMP1:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]]) +; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, ptr [[P]], align 4 ; NO_ASSUME-NEXT: call void @clobber() ; NO_ASSUME-NEXT: ret i32 0 ; ; USE_ASSUME-LABEL: define {{[^@]+}}@test_before_load -; USE_ASSUME-SAME: (i32* [[P:%.*]]) -; USE_ASSUME-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]]) -; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4 +; USE_ASSUME-SAME: (ptr [[P:%.*]]) { +; USE_ASSUME-NEXT: [[TMP1:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]]) +; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, ptr [[P]], align 4 ; USE_ASSUME-NEXT: call void @clobber() -; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]), "align"(i32* [[P]], i64 4) ] +; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[P]], i64 4), "nonnull"(ptr [[P]]), "align"(ptr [[P]], i64 4) ] ; USE_ASSUME-NEXT: ret i32 0 ; call {}* @llvm.invariant.start.p0i32(i64 4, i32* %p) @@ -129,18 +129,18 @@ define i32 @test_before_clobber(i32* %p) { ; NO_ASSUME-LABEL: define {{[^@]+}}@test_before_clobber -; NO_ASSUME-SAME: (i32* [[P:%.*]]) -; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4 -; NO_ASSUME-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]]) +; NO_ASSUME-SAME: (ptr [[P:%.*]]) { +; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, ptr [[P]], align 4 +; NO_ASSUME-NEXT: [[TMP1:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]]) ; NO_ASSUME-NEXT: call void @clobber() ; NO_ASSUME-NEXT: ret i32 0 ; ; USE_ASSUME-LABEL: define {{[^@]+}}@test_before_clobber -; USE_ASSUME-SAME: (i32* [[P:%.*]]) -; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4 -; USE_ASSUME-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]]) +; USE_ASSUME-SAME: (ptr [[P:%.*]]) { +; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, ptr [[P]], align 4 +; USE_ASSUME-NEXT: [[TMP1:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]]) ; USE_ASSUME-NEXT: call void @clobber() -; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]), "align"(i32* [[P]], i64 4) ] +; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[P]], i64 4), "nonnull"(ptr [[P]]), "align"(ptr [[P]], i64 4) ] ; USE_ASSUME-NEXT: ret i32 0 ; %v1 = load i32, i32* %p @@ -153,20 +153,20 @@ define i32 @test_duplicate_scope(i32* %p) { ; NO_ASSUME-LABEL: define {{[^@]+}}@test_duplicate_scope -; NO_ASSUME-SAME: (i32* [[P:%.*]]) -; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4 -; NO_ASSUME-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]]) +; NO_ASSUME-SAME: (ptr [[P:%.*]]) { +; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, ptr [[P]], align 4 +; NO_ASSUME-NEXT: [[TMP1:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]]) ; NO_ASSUME-NEXT: call void @clobber() -; NO_ASSUME-NEXT: [[TMP2:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]]) +; NO_ASSUME-NEXT: [[TMP2:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]]) ; NO_ASSUME-NEXT: ret i32 0 ; ; USE_ASSUME-LABEL: define {{[^@]+}}@test_duplicate_scope -; USE_ASSUME-SAME: (i32* [[P:%.*]]) -; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4 -; USE_ASSUME-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]]) +; USE_ASSUME-SAME: (ptr [[P:%.*]]) { +; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, ptr [[P]], align 4 +; USE_ASSUME-NEXT: [[TMP1:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]]) ; USE_ASSUME-NEXT: call void @clobber() -; USE_ASSUME-NEXT: [[TMP2:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]]) -; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]), "align"(i32* [[P]], i64 4) ] +; USE_ASSUME-NEXT: [[TMP2:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]]) +; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[P]], i64 4), "nonnull"(ptr [[P]]), "align"(ptr [[P]], i64 4) ] ; USE_ASSUME-NEXT: ret i32 0 ; %v1 = load i32, i32* %p @@ -180,20 +180,20 @@ define i32 @test_unanalzyable_load(i32* %p) { ; NO_ASSUME-LABEL: define {{[^@]+}}@test_unanalzyable_load -; NO_ASSUME-SAME: (i32* [[P:%.*]]) -; NO_ASSUME-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]]) +; NO_ASSUME-SAME: (ptr [[P:%.*]]) { +; NO_ASSUME-NEXT: [[TMP1:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]]) ; NO_ASSUME-NEXT: call void @clobber() -; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4 +; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, ptr [[P]], align 4 ; NO_ASSUME-NEXT: call void @clobber() ; NO_ASSUME-NEXT: ret i32 0 ; ; USE_ASSUME-LABEL: define {{[^@]+}}@test_unanalzyable_load -; USE_ASSUME-SAME: (i32* [[P:%.*]]) -; USE_ASSUME-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]]) +; USE_ASSUME-SAME: (ptr [[P:%.*]]) { +; USE_ASSUME-NEXT: [[TMP1:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]]) ; USE_ASSUME-NEXT: call void @clobber() -; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4 +; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, ptr [[P]], align 4 ; USE_ASSUME-NEXT: call void @clobber() -; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]), "align"(i32* [[P]], i64 4) ] +; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[P]], i64 4), "nonnull"(ptr [[P]]), "align"(ptr [[P]], i64 4) ] ; USE_ASSUME-NEXT: ret i32 0 ; call {}* @llvm.invariant.start.p0i32(i64 4, i32* %p) @@ -207,11 +207,11 @@ define i32 @test_negative_after_clobber(i32* %p) { ; CHECK-LABEL: define {{[^@]+}}@test_negative_after_clobber -; CHECK-SAME: (i32* [[P:%.*]]) -; CHECK-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4 +; CHECK-SAME: (ptr [[P:%.*]]) { +; CHECK-NEXT: [[V1:%.*]] = load i32, ptr [[P]], align 4 ; CHECK-NEXT: call void @clobber() -; CHECK-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]]) -; CHECK-NEXT: [[V2:%.*]] = load i32, i32* [[P]], align 4 +; CHECK-NEXT: [[TMP1:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]]) +; CHECK-NEXT: [[V2:%.*]] = load i32, ptr [[P]], align 4 ; CHECK-NEXT: [[SUB:%.*]] = sub i32 [[V1]], [[V2]] ; CHECK-NEXT: ret i32 [[SUB]] ; @@ -225,9 +225,9 @@ define i32 @test_merge(i32* %p, i1 %cnd) { ; NO_ASSUME-LABEL: define {{[^@]+}}@test_merge -; NO_ASSUME-SAME: (i32* [[P:%.*]], i1 [[CND:%.*]]) -; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4 -; NO_ASSUME-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]]) +; NO_ASSUME-SAME: (ptr [[P:%.*]], i1 [[CND:%.*]]) { +; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, ptr [[P]], align 4 +; NO_ASSUME-NEXT: [[TMP1:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]]) ; NO_ASSUME-NEXT: br i1 [[CND]], label [[MERGE:%.*]], label [[TAKEN:%.*]] ; NO_ASSUME: taken: ; NO_ASSUME-NEXT: call void @clobber() @@ -236,15 +236,15 @@ ; NO_ASSUME-NEXT: ret i32 0 ; ; USE_ASSUME-LABEL: define {{[^@]+}}@test_merge -; USE_ASSUME-SAME: (i32* [[P:%.*]], i1 [[CND:%.*]]) -; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4 -; USE_ASSUME-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]]) +; USE_ASSUME-SAME: (ptr [[P:%.*]], i1 [[CND:%.*]]) { +; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, ptr [[P]], align 4 +; USE_ASSUME-NEXT: [[TMP1:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]]) ; USE_ASSUME-NEXT: br i1 [[CND]], label [[MERGE:%.*]], label [[TAKEN:%.*]] ; USE_ASSUME: taken: ; USE_ASSUME-NEXT: call void @clobber() ; USE_ASSUME-NEXT: br label [[MERGE]] ; USE_ASSUME: merge: -; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]), "align"(i32* [[P]], i64 4) ] +; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[P]], i64 4), "nonnull"(ptr [[P]]), "align"(ptr [[P]], i64 4) ] ; USE_ASSUME-NEXT: ret i32 0 ; %v1 = load i32, i32* %p @@ -262,15 +262,15 @@ define i32 @test_negative_after_mergeclobber(i32* %p, i1 %cnd) { ; CHECK-LABEL: define {{[^@]+}}@test_negative_after_mergeclobber -; CHECK-SAME: (i32* [[P:%.*]], i1 [[CND:%.*]]) -; CHECK-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4 +; CHECK-SAME: (ptr [[P:%.*]], i1 [[CND:%.*]]) { +; CHECK-NEXT: [[V1:%.*]] = load i32, ptr [[P]], align 4 ; CHECK-NEXT: br i1 [[CND]], label [[MERGE:%.*]], label [[TAKEN:%.*]] ; CHECK: taken: ; CHECK-NEXT: call void @clobber() ; CHECK-NEXT: br label [[MERGE]] ; CHECK: merge: -; CHECK-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]]) -; CHECK-NEXT: [[V2:%.*]] = load i32, i32* [[P]], align 4 +; CHECK-NEXT: [[TMP1:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]]) +; CHECK-NEXT: [[V2:%.*]] = load i32, ptr [[P]], align 4 ; CHECK-NEXT: [[SUB:%.*]] = sub i32 [[V1]], [[V2]] ; CHECK-NEXT: ret i32 [[SUB]] ; @@ -291,15 +291,15 @@ ; merging facts along distinct paths. define i32 @test_false_negative_merge(i32* %p, i1 %cnd) { ; CHECK-LABEL: define {{[^@]+}}@test_false_negative_merge -; CHECK-SAME: (i32* [[P:%.*]], i1 [[CND:%.*]]) -; CHECK-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4 +; CHECK-SAME: (ptr [[P:%.*]], i1 [[CND:%.*]]) { +; CHECK-NEXT: [[V1:%.*]] = load i32, ptr [[P]], align 4 ; CHECK-NEXT: br i1 [[CND]], label [[MERGE:%.*]], label [[TAKEN:%.*]] ; CHECK: taken: -; CHECK-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]]) +; CHECK-NEXT: [[TMP1:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]]) ; CHECK-NEXT: call void @clobber() ; CHECK-NEXT: br label [[MERGE]] ; CHECK: merge: -; CHECK-NEXT: [[V2:%.*]] = load i32, i32* [[P]], align 4 +; CHECK-NEXT: [[V2:%.*]] = load i32, ptr [[P]], align 4 ; CHECK-NEXT: [[SUB:%.*]] = sub i32 [[V1]], [[V2]] ; CHECK-NEXT: ret i32 [[SUB]] ; @@ -318,10 +318,10 @@ define i32 @test_merge_unanalyzable_load(i32* %p, i1 %cnd) { ; NO_ASSUME-LABEL: define {{[^@]+}}@test_merge_unanalyzable_load -; NO_ASSUME-SAME: (i32* [[P:%.*]], i1 [[CND:%.*]]) -; NO_ASSUME-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]]) +; NO_ASSUME-SAME: (ptr [[P:%.*]], i1 [[CND:%.*]]) { +; NO_ASSUME-NEXT: [[TMP1:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]]) ; NO_ASSUME-NEXT: call void @clobber() -; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4 +; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, ptr [[P]], align 4 ; NO_ASSUME-NEXT: br i1 [[CND]], label [[MERGE:%.*]], label [[TAKEN:%.*]] ; NO_ASSUME: taken: ; NO_ASSUME-NEXT: call void @clobber() @@ -330,16 +330,16 @@ ; NO_ASSUME-NEXT: ret i32 0 ; ; USE_ASSUME-LABEL: define {{[^@]+}}@test_merge_unanalyzable_load -; USE_ASSUME-SAME: (i32* [[P:%.*]], i1 [[CND:%.*]]) -; USE_ASSUME-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]]) +; USE_ASSUME-SAME: (ptr [[P:%.*]], i1 [[CND:%.*]]) { +; USE_ASSUME-NEXT: [[TMP1:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]]) ; USE_ASSUME-NEXT: call void @clobber() -; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4 +; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, ptr [[P]], align 4 ; USE_ASSUME-NEXT: br i1 [[CND]], label [[MERGE:%.*]], label [[TAKEN:%.*]] ; USE_ASSUME: taken: ; USE_ASSUME-NEXT: call void @clobber() ; USE_ASSUME-NEXT: br label [[MERGE]] ; USE_ASSUME: merge: -; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]), "align"(i32* [[P]], i64 4) ] +; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[P]], i64 4), "nonnull"(ptr [[P]]), "align"(ptr [[P]], i64 4) ] ; USE_ASSUME-NEXT: ret i32 0 ; call {}* @llvm.invariant.start.p0i32(i64 4, i32* %p) @@ -358,18 +358,18 @@ define void @test_dse_before_load(i32* %p, i1 %cnd) { ; NO_ASSUME-LABEL: define {{[^@]+}}@test_dse_before_load -; NO_ASSUME-SAME: (i32* [[P:%.*]], i1 [[CND:%.*]]) -; NO_ASSUME-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]]) -; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4 +; NO_ASSUME-SAME: (ptr [[P:%.*]], i1 [[CND:%.*]]) { +; NO_ASSUME-NEXT: [[TMP1:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]]) +; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, ptr [[P]], align 4 ; NO_ASSUME-NEXT: call void @clobber() ; NO_ASSUME-NEXT: ret void ; ; USE_ASSUME-LABEL: define {{[^@]+}}@test_dse_before_load -; USE_ASSUME-SAME: (i32* [[P:%.*]], i1 [[CND:%.*]]) -; USE_ASSUME-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]]) -; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4 +; USE_ASSUME-SAME: (ptr [[P:%.*]], i1 [[CND:%.*]]) { +; USE_ASSUME-NEXT: [[TMP1:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]]) +; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, ptr [[P]], align 4 ; USE_ASSUME-NEXT: call void @clobber() -; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]), "align"(i32* [[P]], i64 4) ] +; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[P]], i64 4), "nonnull"(ptr [[P]]), "align"(ptr [[P]], i64 4) ] ; USE_ASSUME-NEXT: ret void ; call {}* @llvm.invariant.start.p0i32(i64 4, i32* %p) @@ -381,18 +381,18 @@ define void @test_dse_after_load(i32* %p, i1 %cnd) { ; NO_ASSUME-LABEL: define {{[^@]+}}@test_dse_after_load -; NO_ASSUME-SAME: (i32* [[P:%.*]], i1 [[CND:%.*]]) -; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4 -; NO_ASSUME-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]]) +; NO_ASSUME-SAME: (ptr [[P:%.*]], i1 [[CND:%.*]]) { +; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, ptr [[P]], align 4 +; NO_ASSUME-NEXT: [[TMP1:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]]) ; NO_ASSUME-NEXT: call void @clobber() ; NO_ASSUME-NEXT: ret void ; ; USE_ASSUME-LABEL: define {{[^@]+}}@test_dse_after_load -; USE_ASSUME-SAME: (i32* [[P:%.*]], i1 [[CND:%.*]]) -; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4 -; USE_ASSUME-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]]) +; USE_ASSUME-SAME: (ptr [[P:%.*]], i1 [[CND:%.*]]) { +; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, ptr [[P]], align 4 +; USE_ASSUME-NEXT: [[TMP1:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]]) ; USE_ASSUME-NEXT: call void @clobber() -; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]), "align"(i32* [[P]], i64 4) ] +; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[P]], i64 4), "nonnull"(ptr [[P]]), "align"(ptr [[P]], i64 4) ] ; USE_ASSUME-NEXT: ret void ; %v1 = load i32, i32* %p @@ -408,12 +408,11 @@ ; passes will canonicalize away the bitcasts in this example. define i32 @test_false_negative_types(i32* %p) { ; CHECK-LABEL: define {{[^@]+}}@test_false_negative_types -; CHECK-SAME: (i32* [[P:%.*]]) -; CHECK-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]]) -; CHECK-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4 +; CHECK-SAME: (ptr [[P:%.*]]) { +; CHECK-NEXT: [[TMP1:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]]) +; CHECK-NEXT: [[V1:%.*]] = load i32, ptr [[P]], align 4 ; CHECK-NEXT: call void @clobber() -; CHECK-NEXT: [[PF:%.*]] = bitcast i32* [[P]] to float* -; CHECK-NEXT: [[V2F:%.*]] = load float, float* [[PF]], align 4 +; CHECK-NEXT: [[V2F:%.*]] = load float, ptr [[P]], align 4 ; CHECK-NEXT: [[V2:%.*]] = bitcast float [[V2F]] to i32 ; CHECK-NEXT: [[SUB:%.*]] = sub i32 [[V1]], [[V2]] ; CHECK-NEXT: ret i32 [[SUB]] @@ -430,11 +429,11 @@ define i32 @test_negative_size1(i32* %p) { ; CHECK-LABEL: define {{[^@]+}}@test_negative_size1 -; CHECK-SAME: (i32* [[P:%.*]]) -; CHECK-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 3, i32* [[P]]) -; CHECK-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4 +; CHECK-SAME: (ptr [[P:%.*]]) { +; CHECK-NEXT: [[TMP1:%.*]] = call ptr @llvm.invariant.start.p0(i64 3, ptr [[P]]) +; CHECK-NEXT: [[V1:%.*]] = load i32, ptr [[P]], align 4 ; CHECK-NEXT: call void @clobber() -; CHECK-NEXT: [[V2:%.*]] = load i32, i32* [[P]], align 4 +; CHECK-NEXT: [[V2:%.*]] = load i32, ptr [[P]], align 4 ; CHECK-NEXT: [[SUB:%.*]] = sub i32 [[V1]], [[V2]] ; CHECK-NEXT: ret i32 [[SUB]] ; @@ -448,11 +447,11 @@ define i32 @test_negative_size2(i32* %p) { ; CHECK-LABEL: define {{[^@]+}}@test_negative_size2 -; CHECK-SAME: (i32* [[P:%.*]]) -; CHECK-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 0, i32* [[P]]) -; CHECK-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4 +; CHECK-SAME: (ptr [[P:%.*]]) { +; CHECK-NEXT: [[TMP1:%.*]] = call ptr @llvm.invariant.start.p0(i64 0, ptr [[P]]) +; CHECK-NEXT: [[V1:%.*]] = load i32, ptr [[P]], align 4 ; CHECK-NEXT: call void @clobber() -; CHECK-NEXT: [[V2:%.*]] = load i32, i32* [[P]], align 4 +; CHECK-NEXT: [[V2:%.*]] = load i32, ptr [[P]], align 4 ; CHECK-NEXT: [[SUB:%.*]] = sub i32 [[V1]], [[V2]] ; CHECK-NEXT: ret i32 [[SUB]] ; @@ -466,12 +465,12 @@ define i32 @test_negative_scope(i32* %p) { ; CHECK-LABEL: define {{[^@]+}}@test_negative_scope -; CHECK-SAME: (i32* [[P:%.*]]) -; CHECK-NEXT: [[SCOPE:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]]) -; CHECK-NEXT: call void @llvm.invariant.end.p0i32({}* [[SCOPE]], i64 4, i32* [[P]]) -; CHECK-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4 +; CHECK-SAME: (ptr [[P:%.*]]) { +; CHECK-NEXT: [[SCOPE:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]]) +; CHECK-NEXT: call void @llvm.invariant.end.p0(ptr [[SCOPE]], i64 4, ptr [[P]]) +; CHECK-NEXT: [[V1:%.*]] = load i32, ptr [[P]], align 4 ; CHECK-NEXT: call void @clobber() -; CHECK-NEXT: [[V2:%.*]] = load i32, i32* [[P]], align 4 +; CHECK-NEXT: [[V2:%.*]] = load i32, ptr [[P]], align 4 ; CHECK-NEXT: [[SUB:%.*]] = sub i32 [[V1]], [[V2]] ; CHECK-NEXT: ret i32 [[SUB]] ; @@ -486,12 +485,12 @@ define i32 @test_false_negative_scope(i32* %p) { ; CHECK-LABEL: define {{[^@]+}}@test_false_negative_scope -; CHECK-SAME: (i32* [[P:%.*]]) -; CHECK-NEXT: [[SCOPE:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]]) -; CHECK-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4 +; CHECK-SAME: (ptr [[P:%.*]]) { +; CHECK-NEXT: [[SCOPE:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]]) +; CHECK-NEXT: [[V1:%.*]] = load i32, ptr [[P]], align 4 ; CHECK-NEXT: call void @clobber() -; CHECK-NEXT: [[V2:%.*]] = load i32, i32* [[P]], align 4 -; CHECK-NEXT: call void @llvm.invariant.end.p0i32({}* [[SCOPE]], i64 4, i32* [[P]]) +; CHECK-NEXT: [[V2:%.*]] = load i32, ptr [[P]], align 4 +; CHECK-NEXT: call void @llvm.invariant.end.p0(ptr [[SCOPE]], i64 4, ptr [[P]]) ; CHECK-NEXT: [[SUB:%.*]] = sub i32 [[V1]], [[V2]] ; CHECK-NEXT: ret i32 [[SUB]] ; @@ -507,16 +506,16 @@ ; Invariant load defact starts an invariant.start scope of the appropriate size define i32 @test_invariant_load_scope(i32* %p) { ; NO_ASSUME-LABEL: define {{[^@]+}}@test_invariant_load_scope -; NO_ASSUME-SAME: (i32* [[P:%.*]]) -; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4, !invariant.load !0 +; NO_ASSUME-SAME: (ptr [[P:%.*]]) { +; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, ptr [[P]], align 4, !invariant.load !0 ; NO_ASSUME-NEXT: call void @clobber() ; NO_ASSUME-NEXT: ret i32 0 ; ; USE_ASSUME-LABEL: define {{[^@]+}}@test_invariant_load_scope -; USE_ASSUME-SAME: (i32* [[P:%.*]]) -; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4, !invariant.load !0 +; USE_ASSUME-SAME: (ptr [[P:%.*]]) { +; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, ptr [[P]], align 4, !invariant.load !0 ; USE_ASSUME-NEXT: call void @clobber() -; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]), "align"(i32* [[P]], i64 4) ] +; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[P]], i64 4), "nonnull"(ptr [[P]]), "align"(ptr [[P]], i64 4) ] ; USE_ASSUME-NEXT: ret i32 0 ; %v1 = load i32, i32* %p, !invariant.load !{} Index: llvm/test/Transforms/EarlyCSE/masked-intrinsics-unequal-masks.ll =================================================================== --- llvm/test/Transforms/EarlyCSE/masked-intrinsics-unequal-masks.ll +++ llvm/test/Transforms/EarlyCSE/masked-intrinsics-unequal-masks.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -S -early-cse < %s | FileCheck %s +; RUN: opt -S -early-cse -normalize-opaque-pointers < %s | FileCheck %s ; Unequal mask check. @@ -12,7 +12,7 @@ ; Expect the second load to be removed. define <4 x i32> @f3(<4 x i32>* %a0, <4 x i32> %a1) { ; CHECK-LABEL: @f3( -; CHECK-NEXT: [[V0:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[A0:%.*]], i32 4, <4 x i1> , <4 x i32> [[A1:%.*]]) +; CHECK-NEXT: [[V0:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[A0:%.*]], i32 4, <4 x i1> , <4 x i32> [[A1:%.*]]) ; CHECK-NEXT: [[V2:%.*]] = add <4 x i32> [[V0]], [[V0]] ; CHECK-NEXT: ret <4 x i32> [[V2]] ; @@ -26,8 +26,8 @@ ; Expect the second load to remain. define <4 x i32> @f4(<4 x i32>* %a0, <4 x i32> %a1) { ; CHECK-LABEL: @f4( -; CHECK-NEXT: [[V0:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[A0:%.*]], i32 4, <4 x i1> , <4 x i32> [[A1:%.*]]) -; CHECK-NEXT: [[V1:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[A0]], i32 4, <4 x i1> , <4 x i32> zeroinitializer) +; CHECK-NEXT: [[V0:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[A0:%.*]], i32 4, <4 x i1> , <4 x i32> [[A1:%.*]]) +; CHECK-NEXT: [[V1:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[A0]], i32 4, <4 x i1> , <4 x i32> zeroinitializer) ; CHECK-NEXT: [[V2:%.*]] = add <4 x i32> [[V0]], [[V1]] ; CHECK-NEXT: ret <4 x i32> [[V2]] ; @@ -41,8 +41,8 @@ ; Expect the second load to remain. define <4 x i32> @f5(<4 x i32>* %a0, <4 x i32> %a1) { ; CHECK-LABEL: @f5( -; CHECK-NEXT: [[V0:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[A0:%.*]], i32 4, <4 x i1> , <4 x i32> [[A1:%.*]]) -; CHECK-NEXT: [[V1:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[A0]], i32 4, <4 x i1> , <4 x i32> zeroinitializer) +; CHECK-NEXT: [[V0:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[A0:%.*]], i32 4, <4 x i1> , <4 x i32> [[A1:%.*]]) +; CHECK-NEXT: [[V1:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[A0]], i32 4, <4 x i1> , <4 x i32> zeroinitializer) ; CHECK-NEXT: [[V2:%.*]] = add <4 x i32> [[V0]], [[V1]] ; CHECK-NEXT: ret <4 x i32> [[V2]] ; @@ -59,7 +59,7 @@ ; Expect the first store to be removed. define void @f6(<4 x i32> %a0, <4 x i32>* %a1) { ; CHECK-LABEL: @f6( -; CHECK-NEXT: call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> [[A0:%.*]], <4 x i32>* [[A1:%.*]], i32 4, <4 x i1> ) +; CHECK-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[A0:%.*]], ptr [[A1:%.*]], i32 4, <4 x i1> ) ; CHECK-NEXT: ret void ; call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %a0, <4 x i32>* %a1, i32 4, <4 x i1> ) @@ -71,8 +71,8 @@ ; Expect both stores to remain. define void @f7(<4 x i32> %a0, <4 x i32>* %a1) { ; CHECK-LABEL: @f7( -; CHECK-NEXT: call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> [[A0:%.*]], <4 x i32>* [[A1:%.*]], i32 4, <4 x i1> ) -; CHECK-NEXT: call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> [[A0]], <4 x i32>* [[A1]], i32 4, <4 x i1> ) +; CHECK-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[A0:%.*]], ptr [[A1:%.*]], i32 4, <4 x i1> ) +; CHECK-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[A0]], ptr [[A1]], i32 4, <4 x i1> ) ; CHECK-NEXT: ret void ; call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %a0, <4 x i32>* %a1, i32 4, <4 x i1> ) @@ -87,7 +87,7 @@ ; Expect the store to be removed. define <4 x i32> @f8(<4 x i32>* %a0, <4 x i32> %a1) { ; CHECK-LABEL: @f8( -; CHECK-NEXT: [[V0:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[A0:%.*]], i32 4, <4 x i1> , <4 x i32> [[A1:%.*]]) +; CHECK-NEXT: [[V0:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[A0:%.*]], i32 4, <4 x i1> , <4 x i32> [[A1:%.*]]) ; CHECK-NEXT: ret <4 x i32> [[V0]] ; %v0 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %a0, i32 4, <4 x i1> , <4 x i32> %a1) @@ -99,8 +99,8 @@ ; Expect the store to remain. define <4 x i32> @f9(<4 x i32>* %a0, <4 x i32> %a1) { ; CHECK-LABEL: @f9( -; CHECK-NEXT: [[V0:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[A0:%.*]], i32 4, <4 x i1> , <4 x i32> [[A1:%.*]]) -; CHECK-NEXT: call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> [[V0]], <4 x i32>* [[A0]], i32 4, <4 x i1> ) +; CHECK-NEXT: [[V0:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[A0:%.*]], i32 4, <4 x i1> , <4 x i32> [[A1:%.*]]) +; CHECK-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[V0]], ptr [[A0]], i32 4, <4 x i1> ) ; CHECK-NEXT: ret <4 x i32> [[V0]] ; %v0 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %a0, i32 4, <4 x i1> , <4 x i32> %a1) @@ -115,7 +115,7 @@ ; Expect the load to be removed. define <4 x i32> @fa(<4 x i32> %a0, <4 x i32>* %a1) { ; CHECK-LABEL: @fa( -; CHECK-NEXT: call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> [[A0:%.*]], <4 x i32>* [[A1:%.*]], i32 4, <4 x i1> ) +; CHECK-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[A0:%.*]], ptr [[A1:%.*]], i32 4, <4 x i1> ) ; CHECK-NEXT: ret <4 x i32> [[A0]] ; call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %a0, <4 x i32>* %a1, i32 4, <4 x i1> ) @@ -127,8 +127,8 @@ ; Expect the load to remain. define <4 x i32> @fb(<4 x i32> %a0, <4 x i32>* %a1) { ; CHECK-LABEL: @fb( -; CHECK-NEXT: call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> [[A0:%.*]], <4 x i32>* [[A1:%.*]], i32 4, <4 x i1> ) -; CHECK-NEXT: [[V0:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[A1]], i32 4, <4 x i1> , <4 x i32> zeroinitializer) +; CHECK-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[A0:%.*]], ptr [[A1:%.*]], i32 4, <4 x i1> ) +; CHECK-NEXT: [[V0:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[A1]], i32 4, <4 x i1> , <4 x i32> zeroinitializer) ; CHECK-NEXT: ret <4 x i32> [[V0]] ; call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %a0, <4 x i32>* %a1, i32 4, <4 x i1> ) @@ -140,8 +140,8 @@ ; Expect the load to remain. define <4 x i32> @fc(<4 x i32> %a0, <4 x i32>* %a1) { ; CHECK-LABEL: @fc( -; CHECK-NEXT: call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> [[A0:%.*]], <4 x i32>* [[A1:%.*]], i32 4, <4 x i1> ) -; CHECK-NEXT: [[V0:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[A1]], i32 4, <4 x i1> , <4 x i32> undef) +; CHECK-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[A0:%.*]], ptr [[A1:%.*]], i32 4, <4 x i1> ) +; CHECK-NEXT: [[V0:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[A1]], i32 4, <4 x i1> , <4 x i32> undef) ; CHECK-NEXT: ret <4 x i32> [[V0]] ; call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %a0, <4 x i32>* %a1, i32 4, <4 x i1> ) Index: llvm/test/Transforms/EarlyCSE/masked-intrinsics.ll =================================================================== --- llvm/test/Transforms/EarlyCSE/masked-intrinsics.ll +++ llvm/test/Transforms/EarlyCSE/masked-intrinsics.ll @@ -1,10 +1,10 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -S -early-cse < %s | FileCheck %s +; RUN: opt -S -early-cse -normalize-opaque-pointers < %s | FileCheck %s define <128 x i8> @f0(<128 x i8>* %a0, <128 x i8> %a1, <128 x i8> %a2) { ; CHECK-LABEL: @f0( ; CHECK-NEXT: [[V0:%.*]] = icmp eq <128 x i8> [[A1:%.*]], [[A2:%.*]] -; CHECK-NEXT: call void @llvm.masked.store.v128i8.p0v128i8(<128 x i8> [[A1]], <128 x i8>* [[A0:%.*]], i32 4, <128 x i1> [[V0]]) +; CHECK-NEXT: call void @llvm.masked.store.v128i8.p0(<128 x i8> [[A1]], ptr [[A0:%.*]], i32 4, <128 x i1> [[V0]]) ; CHECK-NEXT: ret <128 x i8> [[A1]] ; %v0 = icmp eq <128 x i8> %a1, %a2 @@ -16,7 +16,7 @@ define <128 x i8> @f1(<128 x i8>* %a0, <128 x i8> %a1, <128 x i8> %a2) { ; CHECK-LABEL: @f1( ; CHECK-NEXT: [[V0:%.*]] = icmp eq <128 x i8> [[A1:%.*]], [[A2:%.*]] -; CHECK-NEXT: [[V1:%.*]] = call <128 x i8> @llvm.masked.load.v128i8.p0v128i8(<128 x i8>* [[A0:%.*]], i32 4, <128 x i1> [[V0]], <128 x i8> undef) +; CHECK-NEXT: [[V1:%.*]] = call <128 x i8> @llvm.masked.load.v128i8.p0(ptr [[A0:%.*]], i32 4, <128 x i1> [[V0]], <128 x i8> undef) ; CHECK-NEXT: ret <128 x i8> [[V1]] ; %v0 = icmp eq <128 x i8> %a1, %a2 @@ -28,7 +28,7 @@ define <128 x i8> @f2(<128 x i8>* %a0, <128 x i8> %a1, <128 x i8> %a2) { ; CHECK-LABEL: @f2( ; CHECK-NEXT: [[V0:%.*]] = icmp eq <128 x i8> [[A1:%.*]], [[A2:%.*]] -; CHECK-NEXT: [[V1:%.*]] = call <128 x i8> @llvm.masked.load.v128i8.p0v128i8(<128 x i8>* [[A0:%.*]], i32 4, <128 x i1> [[V0]], <128 x i8> undef) +; CHECK-NEXT: [[V1:%.*]] = call <128 x i8> @llvm.masked.load.v128i8.p0(ptr [[A0:%.*]], i32 4, <128 x i1> [[V0]], <128 x i8> undef) ; CHECK-NEXT: [[V3:%.*]] = add <128 x i8> [[V1]], [[V1]] ; CHECK-NEXT: ret <128 x i8> [[V3]] ; Index: llvm/test/Transforms/EarlyCSE/noalias-scope-decl.ll =================================================================== --- llvm/test/Transforms/EarlyCSE/noalias-scope-decl.ll +++ llvm/test/Transforms/EarlyCSE/noalias-scope-decl.ll @@ -1,12 +1,12 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -S < %s -early-cse -earlycse-debug-hash | FileCheck %s +; RUN: opt -S < %s -early-cse -earlycse-debug-hash -normalize-opaque-pointers | FileCheck %s ; Store-to-load forwarding across a @llvm.experimental.noalias.scope.decl. define float @s2l(float* %p) { ; CHECK-LABEL: @s2l( -; CHECK-NEXT: store float 0.000000e+00, float* [[P:%.*]], align 4 -; CHECK-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata !0) +; CHECK-NEXT: store float 0.000000e+00, ptr [[P:%.*]], align 4 +; CHECK-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META0:![0-9]+]]) ; CHECK-NEXT: ret float 0.000000e+00 ; store float 0.0, float* %p @@ -19,8 +19,8 @@ define float @rle(float* %p) { ; CHECK-LABEL: @rle( -; CHECK-NEXT: [[R:%.*]] = load float, float* [[P:%.*]], align 4 -; CHECK-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata !0) +; CHECK-NEXT: [[R:%.*]] = load float, ptr [[P:%.*]], align 4 +; CHECK-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META0]]) ; CHECK-NEXT: [[T:%.*]] = fadd float [[R]], [[R]] ; CHECK-NEXT: ret float [[T]] ; Index: llvm/test/Transforms/EarlyCSE/phi.ll =================================================================== --- llvm/test/Transforms/EarlyCSE/phi.ll +++ llvm/test/Transforms/EarlyCSE/phi.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -early-cse -earlycse-debug-hash -S < %s | FileCheck %s -; RUN: opt -basic-aa -early-cse-memssa -S < %s | FileCheck %s +; RUN: opt -early-cse -earlycse-debug-hash -normalize-opaque-pointers -S < %s | FileCheck %s +; RUN: opt -basic-aa -early-cse-memssa -normalize-opaque-pointers -S < %s | FileCheck %s ; Most basic case, fully identical PHI nodes define void @test0(i32 %v0, i32 %v1, i1 %c, i32* %d0, i32* %d1) { @@ -14,8 +14,8 @@ ; CHECK: end: ; CHECK-NEXT: [[I0:%.*]] = phi i32 [ [[V0:%.*]], [[B0]] ], [ [[V1:%.*]], [[B1]] ] ; CHECK-NEXT: [[I1:%.*]] = phi i32 [ [[V0]], [[B0]] ], [ [[V1]], [[B1]] ] -; CHECK-NEXT: store i32 [[I0]], i32* [[D0:%.*]], align 4 -; CHECK-NEXT: store i32 [[I1]], i32* [[D1:%.*]], align 4 +; CHECK-NEXT: store i32 [[I0]], ptr [[D0:%.*]], align 4 +; CHECK-NEXT: store i32 [[I1]], ptr [[D1:%.*]], align 4 ; CHECK-NEXT: ret void ; entry: @@ -47,8 +47,8 @@ ; CHECK: end: ; CHECK-NEXT: [[I0:%.*]] = phi i32 [ [[V0:%.*]], [[B0]] ], [ [[V1:%.*]], [[B1]] ] ; CHECK-NEXT: [[I1:%.*]] = phi i32 [ [[V1]], [[B1]] ], [ [[V0]], [[B0]] ] -; CHECK-NEXT: store i32 [[I0]], i32* [[D0:%.*]], align 4 -; CHECK-NEXT: store i32 [[I1]], i32* [[D1:%.*]], align 4 +; CHECK-NEXT: store i32 [[I0]], ptr [[D0:%.*]], align 4 +; CHECK-NEXT: store i32 [[I1]], ptr [[D1:%.*]], align 4 ; CHECK-NEXT: ret void ; entry: @@ -80,8 +80,8 @@ ; CHECK: end: ; CHECK-NEXT: [[I0:%.*]] = phi i32 [ [[V0:%.*]], [[B0]] ], [ [[V1:%.*]], [[B1]] ] ; CHECK-NEXT: [[I1:%.*]] = phi i32 [ [[V0]], [[B0]] ], [ [[V2:%.*]], [[B1]] ] -; CHECK-NEXT: store i32 [[I0]], i32* [[D0:%.*]], align 4 -; CHECK-NEXT: store i32 [[I1]], i32* [[D1:%.*]], align 4 +; CHECK-NEXT: store i32 [[I0]], ptr [[D0:%.*]], align 4 +; CHECK-NEXT: store i32 [[I1]], ptr [[D1:%.*]], align 4 ; CHECK-NEXT: ret void ; entry: @@ -111,8 +111,8 @@ ; CHECK: end: ; CHECK-NEXT: [[I0:%.*]] = phi i32 [ [[V0:%.*]], [[B0]] ], [ [[V1:%.*]], [[B1]] ] ; CHECK-NEXT: [[I1:%.*]] = phi i32 [ [[V2:%.*]], [[B1]] ], [ [[V0]], [[B0]] ] -; CHECK-NEXT: store i32 [[I0]], i32* [[D0:%.*]], align 4 -; CHECK-NEXT: store i32 [[I1]], i32* [[D1:%.*]], align 4 +; CHECK-NEXT: store i32 [[I0]], ptr [[D0:%.*]], align 4 +; CHECK-NEXT: store i32 [[I1]], ptr [[D1:%.*]], align 4 ; CHECK-NEXT: ret void ; entry: @@ -142,8 +142,8 @@ ; CHECK: end: ; CHECK-NEXT: [[I0:%.*]] = phi i32 [ [[V0:%.*]], [[B0]] ], [ [[V1:%.*]], [[B1]] ] ; CHECK-NEXT: [[I1:%.*]] = phi i32 [ [[V1]], [[B1]] ], [ [[V0]], [[B0]] ] -; CHECK-NEXT: store i32 [[I0]], i32* [[D0:%.*]], align 4 -; CHECK-NEXT: store i32 [[I1]], i32* [[D1:%.*]], align 4 +; CHECK-NEXT: store i32 [[I0]], ptr [[D0:%.*]], align 4 +; CHECK-NEXT: store i32 [[I1]], ptr [[D1:%.*]], align 4 ; CHECK-NEXT: ret void ; entry: @@ -174,7 +174,7 @@ ; CHECK-NEXT: br label [[END]] ; CHECK: end: ; CHECK-NEXT: [[I1:%.*]] = phi i32 [ [[V0:%.*]], [[B0]] ], [ [[V1:%.*]], [[B1]] ] -; CHECK-NEXT: store i32 [[I1]], i32* [[D1:%.*]], align 4 +; CHECK-NEXT: store i32 [[I1]], ptr [[D1:%.*]], align 4 ; CHECK-NEXT: ret void ; entry: @@ -203,7 +203,7 @@ ; CHECK-NEXT: br label [[END]] ; CHECK: end: ; CHECK-NEXT: [[I0:%.*]] = phi i32 [ [[V0:%.*]], [[B0]] ], [ [[V1:%.*]], [[B1]] ] -; CHECK-NEXT: store i32 [[I0]], i32* [[D0:%.*]], align 4 +; CHECK-NEXT: store i32 [[I0]], ptr [[D0:%.*]], align 4 ; CHECK-NEXT: ret void ; entry: @@ -235,9 +235,9 @@ ; CHECK-NEXT: [[IBAD:%.*]] = phi i16 [ [[V2:%.*]], [[B0]] ], [ [[V3:%.*]], [[B1]] ] ; CHECK-NEXT: [[I0:%.*]] = phi i32 [ [[V0:%.*]], [[B0]] ], [ [[V1:%.*]], [[B1]] ] ; CHECK-NEXT: [[I1:%.*]] = phi i32 [ [[V0]], [[B0]] ], [ [[V1]], [[B1]] ] -; CHECK-NEXT: store i32 [[I0]], i32* [[D0:%.*]], align 4 -; CHECK-NEXT: store i32 [[I1]], i32* [[D1:%.*]], align 4 -; CHECK-NEXT: store i16 [[IBAD]], i16* [[D2:%.*]], align 2 +; CHECK-NEXT: store i32 [[I0]], ptr [[D0:%.*]], align 4 +; CHECK-NEXT: store i32 [[I1]], ptr [[D1:%.*]], align 4 +; CHECK-NEXT: store i16 [[IBAD]], ptr [[D2:%.*]], align 2 ; CHECK-NEXT: ret void ; entry: @@ -270,9 +270,9 @@ ; CHECK-NEXT: [[I0:%.*]] = phi i32 [ [[V0:%.*]], [[B0]] ], [ [[V1:%.*]], [[B1]] ] ; CHECK-NEXT: [[IBAD:%.*]] = phi i16 [ [[V2:%.*]], [[B0]] ], [ [[V3:%.*]], [[B1]] ] ; CHECK-NEXT: [[I1:%.*]] = phi i32 [ [[V0]], [[B0]] ], [ [[V1]], [[B1]] ] -; CHECK-NEXT: store i32 [[I0]], i32* [[D0:%.*]], align 4 -; CHECK-NEXT: store i32 [[I1]], i32* [[D1:%.*]], align 4 -; CHECK-NEXT: store i16 [[IBAD]], i16* [[D2:%.*]], align 2 +; CHECK-NEXT: store i32 [[I0]], ptr [[D0:%.*]], align 4 +; CHECK-NEXT: store i32 [[I1]], ptr [[D1:%.*]], align 4 +; CHECK-NEXT: store i16 [[IBAD]], ptr [[D2:%.*]], align 2 ; CHECK-NEXT: ret void ; entry: @@ -305,9 +305,9 @@ ; CHECK-NEXT: [[I0:%.*]] = phi i32 [ [[V0:%.*]], [[B0]] ], [ [[V1:%.*]], [[B1]] ] ; CHECK-NEXT: [[I1:%.*]] = phi i32 [ [[V0]], [[B0]] ], [ [[V1]], [[B1]] ] ; CHECK-NEXT: [[IBAD:%.*]] = phi i16 [ [[V2:%.*]], [[B0]] ], [ [[V3:%.*]], [[B1]] ] -; CHECK-NEXT: store i32 [[I0]], i32* [[D0:%.*]], align 4 -; CHECK-NEXT: store i32 [[I1]], i32* [[D1:%.*]], align 4 -; CHECK-NEXT: store i16 [[IBAD]], i16* [[D2:%.*]], align 2 +; CHECK-NEXT: store i32 [[I0]], ptr [[D0:%.*]], align 4 +; CHECK-NEXT: store i32 [[I1]], ptr [[D1:%.*]], align 4 +; CHECK-NEXT: store i16 [[IBAD]], ptr [[D2:%.*]], align 2 ; CHECK-NEXT: ret void ; entry: Index: llvm/tools/opt/NewPMDriver.cpp =================================================================== --- llvm/tools/opt/NewPMDriver.cpp +++ llvm/tools/opt/NewPMDriver.cpp @@ -19,6 +19,8 @@ #include "llvm/Analysis/AliasAnalysis.h" #include "llvm/Analysis/CGSCCPassManager.h" #include "llvm/Analysis/TargetLibraryInfo.h" +#include "llvm/Bitcode/BitcodeReader.h" +#include "llvm/Bitcode/BitcodeWriter.h" #include "llvm/Bitcode/BitcodeWriterPass.h" #include "llvm/Config/llvm-config.h" #include "llvm/IR/Dominators.h" @@ -150,8 +152,65 @@ static cl::opt PseudoProbeForProfiling( "new-pm-pseudo-probe-for-profiling", cl::init(false), cl::Hidden, cl::desc("Emit pseudo probes to enable PGO profile generation.")); +static cl::opt NormalizeOpaquePointers( + "normalize-opaque-pointers", cl::Hidden, + cl::desc("Convert module to opaque pointers before printing")); /// @}} +static std::unique_ptr cloneModuleIntoContext( + LLVMContext &NewCtx, const Module &M, bool ShouldPreserveUseListOrder) { + SmallVector Buffer; + raw_svector_ostream OS(Buffer); + WriteBitcodeToFile(M, OS, ShouldPreserveUseListOrder); + MemoryBufferRef MBuf(OS.str(), "temporary bitcode"); + Expected> Module = parseBitcodeFile(MBuf, NewCtx); + if (!Module) + handleAllErrors(Module.takeError()); + return std::move(*Module); +} + +static void normalizeModuleForOpaquePointers(Module &M) { + for (Function &F : M.functions()) { + for (BasicBlock &BB : F) { + for (Instruction &I : make_early_inc_range(BB)) { + // Drop no-op bitcasts from ptr to ptr, which will usually not be + // present with opaque pointers. + if (auto *BC = dyn_cast(&I)) { + if (BC->getType() == BC->getOperand(0)->getType() && + BC->getType()->isPointerTy()) { + BC->replaceAllUsesWith(BC->getOperand(0)); + BC->eraseFromParent(); + } + } + } + } + } +} + +class CustomPrintModulePass : public PassInfoMixin { + raw_ostream &OS; + bool ShouldPreserveUseListOrder; + +public: + CustomPrintModulePass(raw_ostream &OS, bool ShouldPreserveUseListOrder) + : OS(OS), ShouldPreserveUseListOrder(ShouldPreserveUseListOrder) {} + + PreservedAnalyses run(Module &M, AnalysisManager &) { + if (NormalizeOpaquePointers && M.getContext().supportsTypedPointers()) { + LLVMContext OpaqueCtx; + OpaqueCtx.enableOpaquePointers(); + std::unique_ptr OpaqueM = + cloneModuleIntoContext(OpaqueCtx, M, ShouldPreserveUseListOrder); + normalizeModuleForOpaquePointers(*OpaqueM); + OpaqueM->print(OS, nullptr, ShouldPreserveUseListOrder); + } else { + M.print(OS, nullptr, ShouldPreserveUseListOrder); + } + return PreservedAnalyses::all(); + } + static bool isRequired() { return true; } +}; + template bool tryParsePipelineText(PassBuilder &PB, const cl::opt &PipelineOpt) { @@ -457,7 +516,7 @@ break; // No output pass needed. case OK_OutputAssembly: MPM.addPass( - PrintModulePass(Out->os(), "", ShouldPreserveAssemblyUseListOrder)); + CustomPrintModulePass(Out->os(), ShouldPreserveAssemblyUseListOrder)); break; case OK_OutputBitcode: MPM.addPass(BitcodeWriterPass(Out->os(), ShouldPreserveBitcodeUseListOrder,