Index: llvm/test/Transforms/EarlyCSE/AArch64/intrinsics.ll =================================================================== --- llvm/test/Transforms/EarlyCSE/AArch64/intrinsics.ll +++ llvm/test/Transforms/EarlyCSE/AArch64/intrinsics.ll @@ -1,13 +1,13 @@ -; RUN: opt < %s -S -mtriple=aarch64-none-linux-gnu -mattr=+neon -early-cse -earlycse-debug-hash | FileCheck %s -; RUN: opt < %s -S -mtriple=aarch64-none-linux-gnu -mattr=+neon -basic-aa -early-cse-memssa | FileCheck %s -; RUN: opt < %s -S -mtriple=aarch64-none-linux-gnu -mattr=+neon -passes=early-cse | FileCheck %s -; RUN: opt < %s -S -mtriple=aarch64-none-linux-gnu -mattr=+neon -aa-pipeline=basic-aa -passes='early-cse' | FileCheck %s +; RUN: opt < %s -S -mtriple=aarch64-none-linux-gnu -mattr=+neon -early-cse -earlycse-debug-hash -normalize-opaque-pointers | FileCheck %s +; RUN: opt < %s -S -mtriple=aarch64-none-linux-gnu -mattr=+neon -basic-aa -early-cse-memssa -normalize-opaque-pointers | FileCheck %s +; RUN: opt < %s -S -mtriple=aarch64-none-linux-gnu -mattr=+neon -passes=early-cse -normalize-opaque-pointers | FileCheck %s +; RUN: opt < %s -S -mtriple=aarch64-none-linux-gnu -mattr=+neon -aa-pipeline=basic-aa -passes='early-cse' -normalize-opaque-pointers | FileCheck %s define <4 x i32> @test_cse(i32* %a, [2 x <4 x i32>] %s.coerce, i32 %n) { entry: ; Check that @llvm.aarch64.neon.ld2 is optimized away by Early CSE. ; CHECK-LABEL: @test_cse -; CHECK-NOT: call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0i8 +; CHECK-NOT: call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0 %s.coerce.fca.0.extract = extractvalue [2 x <4 x i32>] %s.coerce, 0 %s.coerce.fca.1.extract = extractvalue [2 x <4 x i32>] %s.coerce, 1 br label %for.cond @@ -41,8 +41,8 @@ entry: ; Check that the first @llvm.aarch64.neon.st2 is optimized away by Early CSE. ; CHECK-LABEL: @test_cse2 -; CHECK-NOT: call void @llvm.aarch64.neon.st2.v4i32.p0i8(<4 x i32> %3, <4 x i32> %3, i8* %0) -; CHECK: call void @llvm.aarch64.neon.st2.v4i32.p0i8(<4 x i32> %s.coerce.fca.0.extract, <4 x i32> %s.coerce.fca.1.extract, i8* %0) +; CHECK-NOT: call void @llvm.aarch64.neon.st2.v4i32.p0(<4 x i32> %3, <4 x i32> %3, ptr %0) +; CHECK: call void @llvm.aarch64.neon.st2.v4i32.p0(<4 x i32> %s.coerce.fca.0.extract, <4 x i32> %s.coerce.fca.1.extract, ptr {{.*}}) %s.coerce.fca.0.extract = extractvalue [2 x <4 x i32>] %s.coerce, 0 %s.coerce.fca.1.extract = extractvalue [2 x <4 x i32>] %s.coerce, 1 br label %for.cond @@ -77,8 +77,8 @@ entry: ; Check that the first @llvm.aarch64.neon.ld2 is optimized away by Early CSE. ; CHECK-LABEL: @test_cse3 -; CHECK: call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0i8 -; CHECK-NOT: call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0i8 +; CHECK: call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0 +; CHECK-NOT: call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0 %s.coerce.fca.0.extract = extractvalue [2 x <4 x i32>] %s.coerce, 0 %s.coerce.fca.1.extract = extractvalue [2 x <4 x i32>] %s.coerce, 1 br label %for.cond @@ -112,7 +112,7 @@ ; Check that the store prevents @llvm.aarch64.neon.ld2 from being optimized ; away by Early CSE. ; CHECK-LABEL: @test_nocse -; CHECK: call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0i8 +; CHECK: call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0 %s.coerce.fca.0.extract = extractvalue [2 x <4 x i32>] %s.coerce, 0 %s.coerce.fca.1.extract = extractvalue [2 x <4 x i32>] %s.coerce, 1 br label %for.cond @@ -148,7 +148,7 @@ ; Check that @llvm.aarch64.neon.ld3 is not optimized away by Early CSE due ; to mismatch between st2 and ld3. ; CHECK-LABEL: @test_nocse2 -; CHECK: call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3.v4i32.p0i8 +; CHECK: call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3.v4i32.p0 %s.coerce.fca.0.extract = extractvalue [2 x <4 x i32>] %s.coerce, 0 %s.coerce.fca.1.extract = extractvalue [2 x <4 x i32>] %s.coerce, 1 br label %for.cond @@ -183,8 +183,8 @@ ; Check that @llvm.aarch64.neon.st3 is not optimized away by Early CSE due to ; mismatch between st2 and st3. ; CHECK-LABEL: @test_nocse3 -; CHECK: call void @llvm.aarch64.neon.st3.v4i32.p0i8 -; CHECK: call void @llvm.aarch64.neon.st2.v4i32.p0i8 +; CHECK: call void @llvm.aarch64.neon.st3.v4i32.p0 +; CHECK: call void @llvm.aarch64.neon.st2.v4i32.p0 %s.coerce.fca.0.extract = extractvalue [2 x <4 x i32>] %s.coerce, 0 %s.coerce.fca.1.extract = extractvalue [2 x <4 x i32>] %s.coerce, 1 br label %for.cond Index: llvm/test/Transforms/EarlyCSE/atomics.ll =================================================================== --- llvm/test/Transforms/EarlyCSE/atomics.ll +++ llvm/test/Transforms/EarlyCSE/atomics.ll @@ -1,12 +1,12 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -S -early-cse -earlycse-debug-hash | FileCheck %s -; RUN: opt < %s -S -basic-aa -early-cse-memssa | FileCheck %s +; RUN: opt < %s -S -early-cse -earlycse-debug-hash -normalize-opaque-pointers | FileCheck %s +; RUN: opt < %s -S -basic-aa -early-cse-memssa -normalize-opaque-pointers | FileCheck %s define i32 @test12(i1 %B, i32* %P1, i32* %P2) { ; CHECK-LABEL: @test12( -; CHECK-NEXT: [[LOAD0:%.*]] = load i32, i32* [[P1:%.*]], align 4 -; CHECK-NEXT: [[TMP1:%.*]] = load atomic i32, i32* [[P2:%.*]] seq_cst, align 4 -; CHECK-NEXT: [[LOAD1:%.*]] = load i32, i32* [[P1]], align 4 +; CHECK-NEXT: [[LOAD0:%.*]] = load i32, ptr [[P1:%.*]], align 4 +; CHECK-NEXT: [[TMP1:%.*]] = load atomic i32, ptr [[P2:%.*]] seq_cst, align 4 +; CHECK-NEXT: [[LOAD1:%.*]] = load i32, ptr [[P1]], align 4 ; CHECK-NEXT: [[SEL:%.*]] = select i1 [[B:%.*]], i32 [[LOAD0]], i32 [[LOAD1]] ; CHECK-NEXT: ret i32 [[SEL]] ; @@ -20,7 +20,7 @@ ; atomic to non-atomic forwarding is legal define i32 @test13(i1 %B, i32* %P1) { ; CHECK-LABEL: @test13( -; CHECK-NEXT: [[A:%.*]] = load atomic i32, i32* [[P1:%.*]] seq_cst, align 4 +; CHECK-NEXT: [[A:%.*]] = load atomic i32, ptr [[P1:%.*]] seq_cst, align 4 ; CHECK-NEXT: ret i32 0 ; %a = load atomic i32, i32* %P1 seq_cst, align 4 @@ -32,7 +32,7 @@ ; atomic to unordered atomic forwarding is legal define i32 @test14(i1 %B, i32* %P1) { ; CHECK-LABEL: @test14( -; CHECK-NEXT: [[A:%.*]] = load atomic i32, i32* [[P1:%.*]] seq_cst, align 4 +; CHECK-NEXT: [[A:%.*]] = load atomic i32, ptr [[P1:%.*]] seq_cst, align 4 ; CHECK-NEXT: ret i32 0 ; %a = load atomic i32, i32* %P1 seq_cst, align 4 @@ -45,8 +45,8 @@ ; than unordered define i32 @test15(i1 %B, i32* %P1, i32* %P2) { ; CHECK-LABEL: @test15( -; CHECK-NEXT: [[A:%.*]] = load atomic i32, i32* [[P1:%.*]] seq_cst, align 4 -; CHECK-NEXT: [[B:%.*]] = load atomic i32, i32* [[P1]] seq_cst, align 4 +; CHECK-NEXT: [[A:%.*]] = load atomic i32, ptr [[P1:%.*]] seq_cst, align 4 +; CHECK-NEXT: [[B:%.*]] = load atomic i32, ptr [[P1]] seq_cst, align 4 ; CHECK-NEXT: [[RES:%.*]] = sub i32 [[A]], [[B]] ; CHECK-NEXT: ret i32 [[RES]] ; @@ -62,8 +62,8 @@ ; do that right now.) define i32 @test16(i1 %B, i32* %P1, i32* %P2) { ; CHECK-LABEL: @test16( -; CHECK-NEXT: [[A:%.*]] = load i32, i32* [[P1:%.*]], align 4 -; CHECK-NEXT: [[B:%.*]] = load atomic i32, i32* [[P1]] unordered, align 4 +; CHECK-NEXT: [[A:%.*]] = load i32, ptr [[P1:%.*]], align 4 +; CHECK-NEXT: [[B:%.*]] = load atomic i32, ptr [[P1]] unordered, align 4 ; CHECK-NEXT: [[RES:%.*]] = sub i32 [[A]], [[B]] ; CHECK-NEXT: ret i32 [[RES]] ; @@ -76,9 +76,9 @@ ; Can't DSE across a full fence define void @fence_seq_cst_store(i1 %B, i32* %P1, i32* %P2) { ; CHECK-LABEL: @fence_seq_cst_store( -; CHECK-NEXT: store i32 0, i32* [[P1:%.*]], align 4 -; CHECK-NEXT: store atomic i32 0, i32* [[P2:%.*]] seq_cst, align 4 -; CHECK-NEXT: store i32 0, i32* [[P1]], align 4 +; CHECK-NEXT: store i32 0, ptr [[P1:%.*]], align 4 +; CHECK-NEXT: store atomic i32 0, ptr [[P2:%.*]] seq_cst, align 4 +; CHECK-NEXT: store i32 0, ptr [[P1]], align 4 ; CHECK-NEXT: ret void ; store i32 0, i32* %P1, align 4 @@ -90,9 +90,9 @@ ; Can't DSE across a full fence define void @fence_seq_cst(i1 %B, i32* %P1, i32* %P2) { ; CHECK-LABEL: @fence_seq_cst( -; CHECK-NEXT: store i32 0, i32* [[P1:%.*]], align 4 +; CHECK-NEXT: store i32 0, ptr [[P1:%.*]], align 4 ; CHECK-NEXT: fence seq_cst -; CHECK-NEXT: store i32 0, i32* [[P1]], align 4 +; CHECK-NEXT: store i32 0, ptr [[P1]], align 4 ; CHECK-NEXT: ret void ; store i32 0, i32* %P1, align 4 @@ -104,9 +104,9 @@ ; Can't DSE across a full fence define void @fence_asm_sideeffect(i1 %B, i32* %P1, i32* %P2) { ; CHECK-LABEL: @fence_asm_sideeffect( -; CHECK-NEXT: store i32 0, i32* [[P1:%.*]], align 4 +; CHECK-NEXT: store i32 0, ptr [[P1:%.*]], align 4 ; CHECK-NEXT: call void asm sideeffect "", ""() -; CHECK-NEXT: store i32 0, i32* [[P1]], align 4 +; CHECK-NEXT: store i32 0, ptr [[P1]], align 4 ; CHECK-NEXT: ret void ; store i32 0, i32* %P1, align 4 @@ -118,9 +118,9 @@ ; Can't DSE across a full fence define void @fence_asm_memory(i1 %B, i32* %P1, i32* %P2) { ; CHECK-LABEL: @fence_asm_memory( -; CHECK-NEXT: store i32 0, i32* [[P1:%.*]], align 4 +; CHECK-NEXT: store i32 0, ptr [[P1:%.*]], align 4 ; CHECK-NEXT: call void asm "", "~{memory}"() -; CHECK-NEXT: store i32 0, i32* [[P1]], align 4 +; CHECK-NEXT: store i32 0, ptr [[P1]], align 4 ; CHECK-NEXT: ret void ; store i32 0, i32* %P1, align 4 @@ -132,8 +132,8 @@ ; Can't remove a volatile load define i32 @volatile_load(i1 %B, i32* %P1, i32* %P2) { ; CHECK-LABEL: @volatile_load( -; CHECK-NEXT: [[A:%.*]] = load i32, i32* [[P1:%.*]], align 4 -; CHECK-NEXT: [[B:%.*]] = load volatile i32, i32* [[P1]], align 4 +; CHECK-NEXT: [[A:%.*]] = load i32, ptr [[P1:%.*]], align 4 +; CHECK-NEXT: [[B:%.*]] = load volatile i32, ptr [[P1]], align 4 ; CHECK-NEXT: [[RES:%.*]] = sub i32 [[A]], [[B]] ; CHECK-NEXT: ret i32 [[RES]] ; @@ -146,8 +146,8 @@ ; Can't remove redundant volatile loads define i32 @redundant_volatile_load(i1 %B, i32* %P1, i32* %P2) { ; CHECK-LABEL: @redundant_volatile_load( -; CHECK-NEXT: [[A:%.*]] = load volatile i32, i32* [[P1:%.*]], align 4 -; CHECK-NEXT: [[B:%.*]] = load volatile i32, i32* [[P1]], align 4 +; CHECK-NEXT: [[A:%.*]] = load volatile i32, ptr [[P1:%.*]], align 4 +; CHECK-NEXT: [[B:%.*]] = load volatile i32, ptr [[P1]], align 4 ; CHECK-NEXT: [[RES:%.*]] = sub i32 [[A]], [[B]] ; CHECK-NEXT: ret i32 [[RES]] ; @@ -160,8 +160,8 @@ ; Can't DSE a volatile store define void @volatile_store(i1 %B, i32* %P1, i32* %P2) { ; CHECK-LABEL: @volatile_store( -; CHECK-NEXT: store volatile i32 0, i32* [[P1:%.*]], align 4 -; CHECK-NEXT: store i32 3, i32* [[P1]], align 4 +; CHECK-NEXT: store volatile i32 0, ptr [[P1:%.*]], align 4 +; CHECK-NEXT: store i32 3, ptr [[P1]], align 4 ; CHECK-NEXT: ret void ; store volatile i32 0, i32* %P1, align 4 @@ -172,8 +172,8 @@ ; Can't DSE a redundant volatile store define void @redundant_volatile_store(i1 %B, i32* %P1, i32* %P2) { ; CHECK-LABEL: @redundant_volatile_store( -; CHECK-NEXT: store volatile i32 0, i32* [[P1:%.*]], align 4 -; CHECK-NEXT: store volatile i32 0, i32* [[P1]], align 4 +; CHECK-NEXT: store volatile i32 0, ptr [[P1:%.*]], align 4 +; CHECK-NEXT: store volatile i32 0, ptr [[P1]], align 4 ; CHECK-NEXT: ret void ; store volatile i32 0, i32* %P1, align 4 @@ -184,7 +184,7 @@ ; Can value forward from volatiles define i32 @test20(i1 %B, i32* %P1, i32* %P2) { ; CHECK-LABEL: @test20( -; CHECK-NEXT: [[A:%.*]] = load volatile i32, i32* [[P1:%.*]], align 4 +; CHECK-NEXT: [[A:%.*]] = load volatile i32, ptr [[P1:%.*]], align 4 ; CHECK-NEXT: ret i32 0 ; %a = load volatile i32, i32* %P1, align 4 @@ -197,8 +197,8 @@ ; currently a missed optimization define void @test21(i1 %B, i32* %P1, i32* %P2) { ; CHECK-LABEL: @test21( -; CHECK-NEXT: store i32 0, i32* [[P1:%.*]], align 4 -; CHECK-NEXT: store volatile i32 3, i32* [[P1]], align 4 +; CHECK-NEXT: store i32 0, ptr [[P1:%.*]], align 4 +; CHECK-NEXT: store volatile i32 3, ptr [[P1]], align 4 ; CHECK-NEXT: ret void ; store i32 0, i32* %P1, align 4 @@ -209,7 +209,7 @@ ; Can DSE a normal store in favor of a unordered one define void @test22(i1 %B, i32* %P1, i32* %P2) { ; CHECK-LABEL: @test22( -; CHECK-NEXT: store atomic i32 3, i32* [[P1:%.*]] unordered, align 4 +; CHECK-NEXT: store atomic i32 3, ptr [[P1:%.*]] unordered, align 4 ; CHECK-NEXT: ret void ; store i32 0, i32* %P1, align 4 @@ -220,7 +220,7 @@ ; Can also DSE a unordered store in favor of a normal one define void @test23(i1 %B, i32* %P1, i32* %P2) { ; CHECK-LABEL: @test23( -; CHECK-NEXT: store i32 0, i32* [[P1:%.*]], align 4 +; CHECK-NEXT: store i32 0, ptr [[P1:%.*]], align 4 ; CHECK-NEXT: ret void ; store atomic i32 3, i32* %P1 unordered, align 4 @@ -233,8 +233,8 @@ ; represent the required ordering. define void @test24(i1 %B, i32* %P1, i32* %P2) { ; CHECK-LABEL: @test24( -; CHECK-NEXT: store atomic i32 3, i32* [[P1:%.*]] release, align 4 -; CHECK-NEXT: store i32 0, i32* [[P1]], align 4 +; CHECK-NEXT: store atomic i32 3, ptr [[P1:%.*]] release, align 4 +; CHECK-NEXT: store i32 0, ptr [[P1]], align 4 ; CHECK-NEXT: ret void ; store atomic i32 3, i32* %P1 release, align 4 @@ -246,8 +246,8 @@ ; the count of such stores is an observable program side effect. define void @test25(i1 %B, i32* %P1, i32* %P2) { ; CHECK-LABEL: @test25( -; CHECK-NEXT: store volatile i32 3, i32* [[P1:%.*]], align 4 -; CHECK-NEXT: store volatile i32 0, i32* [[P1]], align 4 +; CHECK-NEXT: store volatile i32 3, ptr [[P1:%.*]], align 4 +; CHECK-NEXT: store volatile i32 0, ptr [[P1]], align 4 ; CHECK-NEXT: ret void ; store volatile i32 3, i32* %P1, align 4 @@ -258,7 +258,7 @@ ; Can DSE a unordered store in favor of a unordered one define void @test26(i1 %B, i32* %P1, i32* %P2) { ; CHECK-LABEL: @test26( -; CHECK-NEXT: store atomic i32 3, i32* [[P1:%.*]] unordered, align 4 +; CHECK-NEXT: store atomic i32 3, ptr [[P1:%.*]] unordered, align 4 ; CHECK-NEXT: ret void ; store atomic i32 0, i32* %P1 unordered, align 4 @@ -270,8 +270,8 @@ ; but current don't due to implementation limits define void @test27(i1 %B, i32* %P1, i32* %P2) { ; CHECK-LABEL: @test27( -; CHECK-NEXT: store atomic i32 0, i32* [[P1:%.*]] unordered, align 4 -; CHECK-NEXT: store atomic i32 3, i32* [[P1]] release, align 4 +; CHECK-NEXT: store atomic i32 0, ptr [[P1:%.*]] unordered, align 4 +; CHECK-NEXT: store atomic i32 3, ptr [[P1]] release, align 4 ; CHECK-NEXT: ret void ; store atomic i32 0, i32* %P1 unordered, align 4 @@ -283,8 +283,8 @@ ; ordered one, but current don't due to implementation limits define void @test28(i1 %B, i32* %P1, i32* %P2) { ; CHECK-LABEL: @test28( -; CHECK-NEXT: store atomic i32 0, i32* [[P1:%.*]] unordered, align 4 -; CHECK-NEXT: store atomic i32 3, i32* [[P1]] release, align 4 +; CHECK-NEXT: store atomic i32 0, ptr [[P1:%.*]] unordered, align 4 +; CHECK-NEXT: store atomic i32 3, ptr [[P1]] release, align 4 ; CHECK-NEXT: ret void ; store atomic i32 0, i32* %P1 unordered, align 4 @@ -296,8 +296,8 @@ ; see also: @test24 define void @test29(i1 %B, i32* %P1, i32* %P2) { ; CHECK-LABEL: @test29( -; CHECK-NEXT: store atomic i32 3, i32* [[P1:%.*]] release, align 4 -; CHECK-NEXT: store atomic i32 0, i32* [[P1]] unordered, align 4 +; CHECK-NEXT: store atomic i32 3, ptr [[P1:%.*]] release, align 4 +; CHECK-NEXT: store atomic i32 0, ptr [[P1]] unordered, align 4 ; CHECK-NEXT: ret void ; store atomic i32 3, i32* %P1 release, align 4 Index: llvm/test/Transforms/EarlyCSE/basic.ll =================================================================== --- llvm/test/Transforms/EarlyCSE/basic.ll +++ llvm/test/Transforms/EarlyCSE/basic.ll @@ -1,20 +1,20 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -S -early-cse -earlycse-debug-hash | FileCheck %s -; RUN: opt < %s -S -basic-aa -early-cse-memssa | FileCheck %s -; RUN: opt < %s -S -passes=early-cse | FileCheck %s +; RUN: opt < %s -S -early-cse -earlycse-debug-hash -normalize-opaque-pointers | FileCheck %s +; RUN: opt < %s -S -basic-aa -early-cse-memssa -normalize-opaque-pointers | FileCheck %s +; RUN: opt < %s -S -passes=early-cse -normalize-opaque-pointers | FileCheck %s declare void @llvm.assume(i1) nounwind define void @test1(i8 %V, i32 *%P) { ; CHECK-LABEL: @test1( -; CHECK-NEXT: store i32 23, i32* [[P:%.*]], align 4 +; CHECK-NEXT: store i32 23, ptr [[P:%.*]], align 4 ; CHECK-NEXT: [[C:%.*]] = zext i8 [[V:%.*]] to i32 -; CHECK-NEXT: store volatile i32 [[C]], i32* [[P]], align 4 -; CHECK-NEXT: store volatile i32 [[C]], i32* [[P]], align 4 +; CHECK-NEXT: store volatile i32 [[C]], ptr [[P]], align 4 +; CHECK-NEXT: store volatile i32 [[C]], ptr [[P]], align 4 ; CHECK-NEXT: [[E:%.*]] = add i32 [[C]], [[C]] -; CHECK-NEXT: store volatile i32 [[E]], i32* [[P]], align 4 -; CHECK-NEXT: store volatile i32 [[E]], i32* [[P]], align 4 -; CHECK-NEXT: store volatile i32 [[E]], i32* [[P]], align 4 +; CHECK-NEXT: store volatile i32 [[E]], ptr [[P]], align 4 +; CHECK-NEXT: store volatile i32 [[E]], ptr [[P]], align 4 +; CHECK-NEXT: store volatile i32 [[E]], ptr [[P]], align 4 ; CHECK-NEXT: ret void ; %A = bitcast i64 42 to double ;; dead @@ -40,7 +40,7 @@ ;; Simple load value numbering. define i32 @test2(i32 *%P) { ; CHECK-LABEL: @test2( -; CHECK-NEXT: [[V1:%.*]] = load i32, i32* [[P:%.*]], align 4 +; CHECK-NEXT: [[V1:%.*]] = load i32, ptr [[P:%.*]], align 4 ; CHECK-NEXT: ret i32 0 ; %V1 = load i32, i32* %P @@ -51,7 +51,7 @@ define i32 @test2a(i32 *%P, i1 %b) { ; CHECK-LABEL: @test2a( -; CHECK-NEXT: [[V1:%.*]] = load i32, i32* [[P:%.*]], align 4 +; CHECK-NEXT: [[V1:%.*]] = load i32, ptr [[P:%.*]], align 4 ; CHECK-NEXT: tail call void @llvm.assume(i1 [[B:%.*]]) ; CHECK-NEXT: ret i32 0 ; @@ -65,10 +65,10 @@ ;; Cross block load value numbering. define i32 @test3(i32 *%P, i1 %Cond) { ; CHECK-LABEL: @test3( -; CHECK-NEXT: [[V1:%.*]] = load i32, i32* [[P:%.*]], align 4 +; CHECK-NEXT: [[V1:%.*]] = load i32, ptr [[P:%.*]], align 4 ; CHECK-NEXT: br i1 [[COND:%.*]], label [[T:%.*]], label [[F:%.*]] ; CHECK: T: -; CHECK-NEXT: store i32 4, i32* [[P]], align 4 +; CHECK-NEXT: store i32 4, ptr [[P]], align 4 ; CHECK-NEXT: ret i32 42 ; CHECK: F: ; CHECK-NEXT: ret i32 0 @@ -86,10 +86,10 @@ define i32 @test3a(i32 *%P, i1 %Cond, i1 %b) { ; CHECK-LABEL: @test3a( -; CHECK-NEXT: [[V1:%.*]] = load i32, i32* [[P:%.*]], align 4 +; CHECK-NEXT: [[V1:%.*]] = load i32, ptr [[P:%.*]], align 4 ; CHECK-NEXT: br i1 [[COND:%.*]], label [[T:%.*]], label [[F:%.*]] ; CHECK: T: -; CHECK-NEXT: store i32 4, i32* [[P]], align 4 +; CHECK-NEXT: store i32 4, ptr [[P]], align 4 ; CHECK-NEXT: ret i32 42 ; CHECK: F: ; CHECK-NEXT: tail call void @llvm.assume(i1 [[B:%.*]]) @@ -110,12 +110,12 @@ ;; Cross block load value numbering stops when stores happen. define i32 @test4(i32 *%P, i1 %Cond) { ; CHECK-LABEL: @test4( -; CHECK-NEXT: [[V1:%.*]] = load i32, i32* [[P:%.*]], align 4 +; CHECK-NEXT: [[V1:%.*]] = load i32, ptr [[P:%.*]], align 4 ; CHECK-NEXT: br i1 [[COND:%.*]], label [[T:%.*]], label [[F:%.*]] ; CHECK: T: ; CHECK-NEXT: ret i32 42 ; CHECK: F: -; CHECK-NEXT: store i32 42, i32* [[P]], align 4 +; CHECK-NEXT: store i32 42, ptr [[P]], align 4 ; CHECK-NEXT: [[DIFF:%.*]] = sub i32 [[V1]], 42 ; CHECK-NEXT: ret i32 [[DIFF]] ; @@ -137,7 +137,7 @@ ;; Simple call CSE'ing. define i32 @test5(i32 *%P) { ; CHECK-LABEL: @test5( -; CHECK-NEXT: [[V1:%.*]] = call i32 @func(i32* [[P:%.*]]) +; CHECK-NEXT: [[V1:%.*]] = call i32 @func(ptr [[P:%.*]]) ; CHECK-NEXT: ret i32 0 ; %V1 = call i32 @func(i32* %P) @@ -149,7 +149,7 @@ ;; Trivial Store->load forwarding define i32 @test6(i32 *%P) { ; CHECK-LABEL: @test6( -; CHECK-NEXT: store i32 42, i32* [[P:%.*]], align 4 +; CHECK-NEXT: store i32 42, ptr [[P:%.*]], align 4 ; CHECK-NEXT: ret i32 42 ; store i32 42, i32* %P @@ -159,7 +159,7 @@ define i32 @test6a(i32 *%P, i1 %b) { ; CHECK-LABEL: @test6a( -; CHECK-NEXT: store i32 42, i32* [[P:%.*]], align 4 +; CHECK-NEXT: store i32 42, ptr [[P:%.*]], align 4 ; CHECK-NEXT: tail call void @llvm.assume(i1 [[B:%.*]]) ; CHECK-NEXT: ret i32 42 ; @@ -172,7 +172,7 @@ ;; Trivial dead store elimination. define void @test7(i32 *%P) { ; CHECK-LABEL: @test7( -; CHECK-NEXT: store i32 45, i32* [[P:%.*]], align 4 +; CHECK-NEXT: store i32 45, ptr [[P:%.*]], align 4 ; CHECK-NEXT: ret void ; store i32 42, i32* %P @@ -183,8 +183,8 @@ ;; Readnone functions aren't invalidated by stores. define i32 @test8(i32 *%P) { ; CHECK-LABEL: @test8( -; CHECK-NEXT: [[V1:%.*]] = call i32 @func(i32* [[P:%.*]]) #[[ATTR2:[0-9]+]] -; CHECK-NEXT: store i32 4, i32* [[P]], align 4 +; CHECK-NEXT: [[V1:%.*]] = call i32 @func(ptr [[P:%.*]]) #[[ATTR2:[0-9]+]] +; CHECK-NEXT: store i32 4, ptr [[P]], align 4 ; CHECK-NEXT: ret i32 0 ; %V1 = call i32 @func(i32* %P) readnone @@ -198,9 +198,9 @@ ;; can observe the earlier write. define i32 @test9(i32 *%P) { ; CHECK-LABEL: @test9( -; CHECK-NEXT: store i32 4, i32* [[P:%.*]], align 4 -; CHECK-NEXT: [[V1:%.*]] = call i32 @func(i32* [[P]]) #[[ATTR1:[0-9]+]] -; CHECK-NEXT: store i32 5, i32* [[P]], align 4 +; CHECK-NEXT: store i32 4, ptr [[P:%.*]], align 4 +; CHECK-NEXT: [[V1:%.*]] = call i32 @func(ptr [[P]]) #[[ATTR1:[0-9]+]] +; CHECK-NEXT: store i32 5, ptr [[P]], align 4 ; CHECK-NEXT: ret i32 [[V1]] ; store i32 4, i32* %P @@ -212,8 +212,8 @@ ;; Trivial DSE can be performed across a readnone call. define i32 @test10(i32 *%P) { ; CHECK-LABEL: @test10( -; CHECK-NEXT: [[V1:%.*]] = call i32 @func(i32* [[P:%.*]]) #[[ATTR2]] -; CHECK-NEXT: store i32 5, i32* [[P]], align 4 +; CHECK-NEXT: [[V1:%.*]] = call i32 @func(ptr [[P:%.*]]) #[[ATTR2]] +; CHECK-NEXT: store i32 5, ptr [[P]], align 4 ; CHECK-NEXT: ret i32 [[V1]] ; store i32 4, i32* %P @@ -225,7 +225,7 @@ ;; Trivial dead store elimination - should work for an entire series of dead stores too. define void @test11(i32 *%P) { ; CHECK-LABEL: @test11( -; CHECK-NEXT: store i32 45, i32* [[P:%.*]], align 4 +; CHECK-NEXT: store i32 45, ptr [[P:%.*]], align 4 ; CHECK-NEXT: ret void ; store i32 42, i32* %P @@ -237,9 +237,9 @@ define i32 @test12(i1 %B, i32* %P1, i32* %P2) { ; CHECK-LABEL: @test12( -; CHECK-NEXT: [[LOAD0:%.*]] = load i32, i32* [[P1:%.*]], align 4 -; CHECK-NEXT: [[TMP1:%.*]] = load atomic i32, i32* [[P2:%.*]] seq_cst, align 4 -; CHECK-NEXT: [[LOAD1:%.*]] = load i32, i32* [[P1]], align 4 +; CHECK-NEXT: [[LOAD0:%.*]] = load i32, ptr [[P1:%.*]], align 4 +; CHECK-NEXT: [[TMP1:%.*]] = load atomic i32, ptr [[P2:%.*]] seq_cst, align 4 +; CHECK-NEXT: [[LOAD1:%.*]] = load i32, ptr [[P1]], align 4 ; CHECK-NEXT: [[SEL:%.*]] = select i1 [[B:%.*]], i32 [[LOAD0]], i32 [[LOAD1]] ; CHECK-NEXT: ret i32 [[SEL]] ; @@ -252,7 +252,7 @@ define void @dse1(i32 *%P) { ; CHECK-LABEL: @dse1( -; CHECK-NEXT: [[V:%.*]] = load i32, i32* [[P:%.*]], align 4 +; CHECK-NEXT: [[V:%.*]] = load i32, ptr [[P:%.*]], align 4 ; CHECK-NEXT: ret void ; %v = load i32, i32* %P @@ -262,7 +262,7 @@ define void @dse2(i32 *%P) { ; CHECK-LABEL: @dse2( -; CHECK-NEXT: [[V:%.*]] = load atomic i32, i32* [[P:%.*]] seq_cst, align 4 +; CHECK-NEXT: [[V:%.*]] = load atomic i32, ptr [[P:%.*]] seq_cst, align 4 ; CHECK-NEXT: ret void ; %v = load atomic i32, i32* %P seq_cst, align 4 @@ -272,7 +272,7 @@ define void @dse3(i32 *%P) { ; CHECK-LABEL: @dse3( -; CHECK-NEXT: [[V:%.*]] = load atomic i32, i32* [[P:%.*]] seq_cst, align 4 +; CHECK-NEXT: [[V:%.*]] = load atomic i32, ptr [[P:%.*]] seq_cst, align 4 ; CHECK-NEXT: ret void ; %v = load atomic i32, i32* %P seq_cst, align 4 @@ -282,8 +282,8 @@ define i32 @dse4(i32 *%P, i32 *%Q) { ; CHECK-LABEL: @dse4( -; CHECK-NEXT: [[A:%.*]] = load i32, i32* [[Q:%.*]], align 4 -; CHECK-NEXT: [[V:%.*]] = load atomic i32, i32* [[P:%.*]] unordered, align 4 +; CHECK-NEXT: [[A:%.*]] = load i32, ptr [[Q:%.*]], align 4 +; CHECK-NEXT: [[V:%.*]] = load atomic i32, ptr [[P:%.*]] unordered, align 4 ; CHECK-NEXT: ret i32 0 ; %a = load i32, i32* %Q @@ -302,8 +302,8 @@ ; not have to respect the order in which those writes were done. define i32 @dse5(i32 *%P, i32 *%Q) { ; CHECK-LABEL: @dse5( -; CHECK-NEXT: [[V:%.*]] = load atomic i32, i32* [[P:%.*]] unordered, align 4 -; CHECK-NEXT: [[A:%.*]] = load atomic i32, i32* [[Q:%.*]] unordered, align 4 +; CHECK-NEXT: [[V:%.*]] = load atomic i32, ptr [[P:%.*]] unordered, align 4 +; CHECK-NEXT: [[A:%.*]] = load atomic i32, ptr [[Q:%.*]] unordered, align 4 ; CHECK-NEXT: ret i32 0 ; %v = load atomic i32, i32* %P unordered, align 4 @@ -317,7 +317,7 @@ define void @dse_neg1(i32 *%P) { ; CHECK-LABEL: @dse_neg1( -; CHECK-NEXT: store i32 5, i32* [[P:%.*]], align 4 +; CHECK-NEXT: store i32 5, ptr [[P:%.*]], align 4 ; CHECK-NEXT: ret void ; %v = load i32, i32* %P @@ -329,8 +329,8 @@ ; encoded. define void @dse_neg2(i32 *%P) { ; CHECK-LABEL: @dse_neg2( -; CHECK-NEXT: [[V:%.*]] = load i32, i32* [[P:%.*]], align 4 -; CHECK-NEXT: store atomic i32 [[V]], i32* [[P]] seq_cst, align 4 +; CHECK-NEXT: [[V:%.*]] = load i32, ptr [[P:%.*]], align 4 +; CHECK-NEXT: store atomic i32 [[V]], ptr [[P]] seq_cst, align 4 ; CHECK-NEXT: ret void ; %v = load i32, i32* %P @@ -343,9 +343,9 @@ define void @pr28763() { ; CHECK-LABEL: @pr28763( ; CHECK-NEXT: entry: -; CHECK-NEXT: store i32 0, i32* @c, align 4 +; CHECK-NEXT: store i32 0, ptr @c, align 4 ; CHECK-NEXT: [[CALL:%.*]] = call i32 @reads_c(i32 0) -; CHECK-NEXT: store i32 2, i32* @c, align 4 +; CHECK-NEXT: store i32 2, ptr @c, align 4 ; CHECK-NEXT: ret void ; entry: Index: llvm/test/Transforms/EarlyCSE/commute.ll =================================================================== --- llvm/test/Transforms/EarlyCSE/commute.ll +++ llvm/test/Transforms/EarlyCSE/commute.ll @@ -1,12 +1,12 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -S -early-cse -earlycse-debug-hash | FileCheck %s -; RUN: opt < %s -S -basic-aa -early-cse-memssa | FileCheck %s +; RUN: opt < %s -S -early-cse -earlycse-debug-hash -normalize-opaque-pointers | FileCheck %s +; RUN: opt < %s -S -basic-aa -early-cse-memssa -normalize-opaque-pointers | FileCheck %s define void @test1(float %A, float %B, float* %PA, float* %PB) { ; CHECK-LABEL: @test1( ; CHECK-NEXT: [[C:%.*]] = fadd float [[A:%.*]], [[B:%.*]] -; CHECK-NEXT: store float [[C]], float* [[PA:%.*]], align 4 -; CHECK-NEXT: store float [[C]], float* [[PB:%.*]], align 4 +; CHECK-NEXT: store float [[C]], ptr [[PA:%.*]], align 4 +; CHECK-NEXT: store float [[C]], ptr [[PB:%.*]], align 4 ; CHECK-NEXT: ret void ; %C = fadd float %A, %B @@ -19,8 +19,8 @@ define void @test2(float %A, float %B, i1* %PA, i1* %PB) { ; CHECK-LABEL: @test2( ; CHECK-NEXT: [[C:%.*]] = fcmp oeq float [[A:%.*]], [[B:%.*]] -; CHECK-NEXT: store i1 [[C]], i1* [[PA:%.*]], align 1 -; CHECK-NEXT: store i1 [[C]], i1* [[PB:%.*]], align 1 +; CHECK-NEXT: store i1 [[C]], ptr [[PA:%.*]], align 1 +; CHECK-NEXT: store i1 [[C]], ptr [[PB:%.*]], align 1 ; CHECK-NEXT: ret void ; %C = fcmp oeq float %A, %B @@ -33,8 +33,8 @@ define void @test3(float %A, float %B, i1* %PA, i1* %PB) { ; CHECK-LABEL: @test3( ; CHECK-NEXT: [[C:%.*]] = fcmp uge float [[A:%.*]], [[B:%.*]] -; CHECK-NEXT: store i1 [[C]], i1* [[PA:%.*]], align 1 -; CHECK-NEXT: store i1 [[C]], i1* [[PB:%.*]], align 1 +; CHECK-NEXT: store i1 [[C]], ptr [[PA:%.*]], align 1 +; CHECK-NEXT: store i1 [[C]], ptr [[PB:%.*]], align 1 ; CHECK-NEXT: ret void ; %C = fcmp uge float %A, %B @@ -47,8 +47,8 @@ define void @test4(i32 %A, i32 %B, i1* %PA, i1* %PB) { ; CHECK-LABEL: @test4( ; CHECK-NEXT: [[C:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]] -; CHECK-NEXT: store i1 [[C]], i1* [[PA:%.*]], align 1 -; CHECK-NEXT: store i1 [[C]], i1* [[PB:%.*]], align 1 +; CHECK-NEXT: store i1 [[C]], ptr [[PA:%.*]], align 1 +; CHECK-NEXT: store i1 [[C]], ptr [[PB:%.*]], align 1 ; CHECK-NEXT: ret void ; %C = icmp eq i32 %A, %B @@ -61,8 +61,8 @@ define void @test5(i32 %A, i32 %B, i1* %PA, i1* %PB) { ; CHECK-LABEL: @test5( ; CHECK-NEXT: [[C:%.*]] = icmp sgt i32 [[A:%.*]], [[B:%.*]] -; CHECK-NEXT: store i1 [[C]], i1* [[PA:%.*]], align 1 -; CHECK-NEXT: store i1 [[C]], i1* [[PB:%.*]], align 1 +; CHECK-NEXT: store i1 [[C]], ptr [[PA:%.*]], align 1 +; CHECK-NEXT: store i1 [[C]], ptr [[PB:%.*]], align 1 ; CHECK-NEXT: ret void ; %C = icmp sgt i32 %A, %B @@ -77,8 +77,8 @@ define void @test6(float %f, i1* %p1, i1* %p2) { ; CHECK-LABEL: @test6( ; CHECK-NEXT: [[C1:%.*]] = fcmp ult float [[F:%.*]], [[F]] -; CHECK-NEXT: store i1 [[C1]], i1* [[P1:%.*]], align 1 -; CHECK-NEXT: store i1 [[C1]], i1* [[P2:%.*]], align 1 +; CHECK-NEXT: store i1 [[C1]], ptr [[P1:%.*]], align 1 +; CHECK-NEXT: store i1 [[C1]], ptr [[P2:%.*]], align 1 ; CHECK-NEXT: ret void ; %c1 = fcmp ult float %f, %f @@ -748,14 +748,14 @@ ; negation of each negation to check for the same issue one level deeper. define void @not_not_min(i32* %px, i32* %py, i32* %pout) { ; CHECK-LABEL: @not_not_min( -; CHECK-NEXT: [[X:%.*]] = load volatile i32, i32* [[PX:%.*]], align 4 -; CHECK-NEXT: [[Y:%.*]] = load volatile i32, i32* [[PY:%.*]], align 4 +; CHECK-NEXT: [[X:%.*]] = load volatile i32, ptr [[PX:%.*]], align 4 +; CHECK-NEXT: [[Y:%.*]] = load volatile i32, ptr [[PY:%.*]], align 4 ; CHECK-NEXT: [[CMPA:%.*]] = icmp slt i32 [[X]], [[Y]] ; CHECK-NEXT: [[CMPB:%.*]] = xor i1 [[CMPA]], true ; CHECK-NEXT: [[RA:%.*]] = select i1 [[CMPA]], i32 [[X]], i32 [[Y]] -; CHECK-NEXT: store volatile i32 [[RA]], i32* [[POUT:%.*]], align 4 -; CHECK-NEXT: store volatile i32 [[RA]], i32* [[POUT]], align 4 -; CHECK-NEXT: store volatile i32 [[RA]], i32* [[POUT]], align 4 +; CHECK-NEXT: store volatile i32 [[RA]], ptr [[POUT:%.*]], align 4 +; CHECK-NEXT: store volatile i32 [[RA]], ptr [[POUT]], align 4 +; CHECK-NEXT: store volatile i32 [[RA]], ptr [[POUT]], align 4 ; CHECK-NEXT: ret void ; %x = load volatile i32, i32* %px Index: llvm/test/Transforms/EarlyCSE/const-speculation.ll =================================================================== --- llvm/test/Transforms/EarlyCSE/const-speculation.ll +++ llvm/test/Transforms/EarlyCSE/const-speculation.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -early-cse -earlycse-debug-hash -S %s | FileCheck %s +; RUN: opt -early-cse -earlycse-debug-hash -normalize-opaque-pointers -S %s | FileCheck %s %mystruct = type { i32 } @@ -22,8 +22,8 @@ ; CHECK: select: ; CHECK-NEXT: br label [[END]] ; CHECK: end: -; CHECK-NEXT: [[TMP:%.*]] = phi i32* [ null, [[ENTRY:%.*]] ], [ getelementptr inbounds ([[MYSTRUCT:%.*]], %mystruct* @var, i64 0, i32 0), [[SELECT]] ] -; CHECK-NEXT: [[RES:%.*]] = icmp eq i32* [[TMP]], null +; CHECK-NEXT: [[TMP:%.*]] = phi ptr [ null, [[ENTRY:%.*]] ], [ getelementptr inbounds ([[MYSTRUCT:%.*]], ptr @var, i64 0, i32 0), [[SELECT]] ] +; CHECK-NEXT: [[RES:%.*]] = icmp eq ptr [[TMP]], null ; CHECK-NEXT: ret i1 [[RES]] ; entry: Index: llvm/test/Transforms/EarlyCSE/flags.ll =================================================================== --- llvm/test/Transforms/EarlyCSE/flags.ll +++ llvm/test/Transforms/EarlyCSE/flags.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -early-cse -earlycse-debug-hash -S < %s | FileCheck %s -; RUN: opt -basic-aa -early-cse-memssa -S < %s | FileCheck %s +; RUN: opt -early-cse -earlycse-debug-hash -normalize-opaque-pointers -S < %s | FileCheck %s +; RUN: opt -basic-aa -early-cse-memssa -normalize-opaque-pointers -S < %s | FileCheck %s declare void @use(i1) @@ -22,9 +22,9 @@ define void @test_inbounds_program_ub_if_first_gep_poison(i8* %ptr, i64 %n) { ; CHECK-LABEL: @test_inbounds_program_ub_if_first_gep_poison( -; CHECK-NEXT: [[ADD_PTR_1:%.*]] = getelementptr inbounds i8, i8* [[PTR:%.*]], i64 [[N:%.*]] -; CHECK-NEXT: call void @use.i8(i8* noundef [[ADD_PTR_1]]) -; CHECK-NEXT: call void @use.i8(i8* [[ADD_PTR_1]]) +; CHECK-NEXT: [[ADD_PTR_1:%.*]] = getelementptr inbounds i8, ptr [[PTR:%.*]], i64 [[N:%.*]] +; CHECK-NEXT: call void @use.i8(ptr noundef [[ADD_PTR_1]]) +; CHECK-NEXT: call void @use.i8(ptr [[ADD_PTR_1]]) ; CHECK-NEXT: ret void ; %add.ptr.1 = getelementptr inbounds i8, i8* %ptr, i64 %n @@ -36,9 +36,9 @@ define void @test_inbounds_program_not_ub_if_first_gep_poison(i8* %ptr, i64 %n) { ; CHECK-LABEL: @test_inbounds_program_not_ub_if_first_gep_poison( -; CHECK-NEXT: [[ADD_PTR_1:%.*]] = getelementptr i8, i8* [[PTR:%.*]], i64 [[N:%.*]] -; CHECK-NEXT: call void @use.i8(i8* [[ADD_PTR_1]]) -; CHECK-NEXT: call void @use.i8(i8* [[ADD_PTR_1]]) +; CHECK-NEXT: [[ADD_PTR_1:%.*]] = getelementptr i8, ptr [[PTR:%.*]], i64 [[N:%.*]] +; CHECK-NEXT: call void @use.i8(ptr [[ADD_PTR_1]]) +; CHECK-NEXT: call void @use.i8(ptr [[ADD_PTR_1]]) ; CHECK-NEXT: ret void ; %add.ptr.1 = getelementptr inbounds i8, i8* %ptr, i64 %n Index: llvm/test/Transforms/EarlyCSE/floatingpoint.ll =================================================================== --- llvm/test/Transforms/EarlyCSE/floatingpoint.ll +++ llvm/test/Transforms/EarlyCSE/floatingpoint.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -S -early-cse -earlycse-debug-hash | FileCheck %s -; RUN: opt < %s -S -basic-aa -early-cse-memssa | FileCheck %s +; RUN: opt < %s -S -early-cse -earlycse-debug-hash -normalize-opaque-pointers | FileCheck %s +; RUN: opt < %s -S -basic-aa -early-cse-memssa -normalize-opaque-pointers | FileCheck %s ; Ensure we don't simplify away additions vectors of +0.0's (same as scalars). define <4 x float> @fV( <4 x float> %a) { @@ -24,8 +24,8 @@ define void @fX(<4 x float> *%p, <4 x float> %a) { ; CHECK-LABEL: @fX( ; CHECK-NEXT: [[X:%.*]] = fneg <4 x float> [[A:%.*]] -; CHECK-NEXT: store volatile <4 x float> [[X]], <4 x float>* [[P:%.*]], align 16 -; CHECK-NEXT: store volatile <4 x float> [[X]], <4 x float>* [[P]], align 16 +; CHECK-NEXT: store volatile <4 x float> [[X]], ptr [[P:%.*]], align 16 +; CHECK-NEXT: store volatile <4 x float> [[X]], ptr [[P]], align 16 ; CHECK-NEXT: ret void ; %x = fneg <4 x float> %a Index: llvm/test/Transforms/EarlyCSE/guards.ll =================================================================== --- llvm/test/Transforms/EarlyCSE/guards.ll +++ llvm/test/Transforms/EarlyCSE/guards.ll @@ -1,7 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -S -early-cse -earlycse-debug-hash < %s | FileCheck %s --check-prefixes=CHECK,NO_ASSUME -; RUN: opt < %s -S -basic-aa -early-cse-memssa | FileCheck %s --check-prefixes=CHECK,NO_ASSUME -; RUN: opt < %s -S -basic-aa -early-cse-memssa --enable-knowledge-retention | FileCheck %s --check-prefixes=CHECK,USE_ASSUME +; RUN: opt -S -early-cse -earlycse-debug-hash -normalize-opaque-pointers < %s | FileCheck %s --check-prefixes=CHECK,NO_ASSUME +; RUN: opt < %s -S -basic-aa -early-cse-memssa -normalize-opaque-pointers | FileCheck %s --check-prefixes=CHECK,NO_ASSUME +; RUN: opt < %s -S -basic-aa -early-cse-memssa --enable-knowledge-retention -normalize-opaque-pointers | FileCheck %s --check-prefixes=CHECK,USE_ASSUME declare void @llvm.experimental.guard(i1,...) @@ -11,14 +11,14 @@ ; We can do store to load forwarding over a guard, since it does not ; clobber memory ; NO_ASSUME-LABEL: @test0( -; NO_ASSUME-NEXT: store i32 40, i32* [[PTR:%.*]], align 4 +; NO_ASSUME-NEXT: store i32 40, ptr [[PTR:%.*]], align 4 ; NO_ASSUME-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[COND:%.*]]) [ "deopt"() ] ; NO_ASSUME-NEXT: ret i32 40 ; ; USE_ASSUME-LABEL: @test0( -; USE_ASSUME-NEXT: store i32 40, i32* [[PTR:%.*]], align 4 +; USE_ASSUME-NEXT: store i32 40, ptr [[PTR:%.*]], align 4 ; USE_ASSUME-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[COND:%.*]]) [ "deopt"() ] -; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[PTR]], i64 4), "nonnull"(i32* [[PTR]]), "align"(i32* [[PTR]], i64 4) ] +; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[PTR]], i64 4), "nonnull"(ptr [[PTR]]), "align"(ptr [[PTR]], i64 4) ] ; USE_ASSUME-NEXT: ret i32 40 ; @@ -31,14 +31,14 @@ define i32 @test1(i32* %val, i1 %cond) { ; We can CSE loads over a guard, since it does not clobber memory ; NO_ASSUME-LABEL: @test1( -; NO_ASSUME-NEXT: [[VAL0:%.*]] = load i32, i32* [[VAL:%.*]], align 4 +; NO_ASSUME-NEXT: [[VAL0:%.*]] = load i32, ptr [[VAL:%.*]], align 4 ; NO_ASSUME-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[COND:%.*]]) [ "deopt"() ] ; NO_ASSUME-NEXT: ret i32 0 ; ; USE_ASSUME-LABEL: @test1( -; USE_ASSUME-NEXT: [[VAL0:%.*]] = load i32, i32* [[VAL:%.*]], align 4 +; USE_ASSUME-NEXT: [[VAL0:%.*]] = load i32, ptr [[VAL:%.*]], align 4 ; USE_ASSUME-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[COND:%.*]]) [ "deopt"() ] -; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[VAL]], i64 4), "nonnull"(i32* [[VAL]]), "align"(i32* [[VAL]], i64 4) ] +; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[VAL]], i64 4), "nonnull"(ptr [[VAL]]), "align"(ptr [[VAL]], i64 4) ] ; USE_ASSUME-NEXT: ret i32 0 ; @@ -185,9 +185,9 @@ ; Guard intrinsics do _read_ memory, so th call to guard below needs ; to see the store of 500 to %ptr ; CHECK-LABEL: @test6( -; CHECK-NEXT: store i32 500, i32* [[PTR:%.*]], align 4 +; CHECK-NEXT: store i32 500, ptr [[PTR:%.*]], align 4 ; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[C:%.*]]) [ "deopt"() ] -; CHECK-NEXT: store i32 600, i32* [[PTR]], align 4 +; CHECK-NEXT: store i32 600, ptr [[PTR]], align 4 ; CHECK-NEXT: ret void ; @@ -219,17 +219,17 @@ ; block in case when the condition is not recalculated. ; NO_ASSUME-LABEL: @test08( ; NO_ASSUME-NEXT: [[CMP:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]] -; NO_ASSUME-NEXT: store i32 100, i32* [[PTR:%.*]], align 4 +; NO_ASSUME-NEXT: store i32 100, ptr [[PTR:%.*]], align 4 ; NO_ASSUME-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[CMP]]) [ "deopt"() ] -; NO_ASSUME-NEXT: store i32 400, i32* [[PTR]], align 4 +; NO_ASSUME-NEXT: store i32 400, ptr [[PTR]], align 4 ; NO_ASSUME-NEXT: ret void ; ; USE_ASSUME-LABEL: @test08( ; USE_ASSUME-NEXT: [[CMP:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]] -; USE_ASSUME-NEXT: store i32 100, i32* [[PTR:%.*]], align 4 +; USE_ASSUME-NEXT: store i32 100, ptr [[PTR:%.*]], align 4 ; USE_ASSUME-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[CMP]]) [ "deopt"() ] -; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[PTR]], i64 4), "nonnull"(i32* [[PTR]]), "align"(i32* [[PTR]], i64 4) ] -; USE_ASSUME-NEXT: store i32 400, i32* [[PTR]], align 4 +; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[PTR]], i64 4), "nonnull"(ptr [[PTR]]), "align"(ptr [[PTR]], i64 4) ] +; USE_ASSUME-NEXT: store i32 400, ptr [[PTR]], align 4 ; USE_ASSUME-NEXT: ret void ; @@ -251,15 +251,15 @@ ; NO_ASSUME-LABEL: @test09( ; NO_ASSUME-NEXT: entry: ; NO_ASSUME-NEXT: [[CMP:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]] -; NO_ASSUME-NEXT: store i32 100, i32* [[PTR:%.*]], align 4 +; NO_ASSUME-NEXT: store i32 100, ptr [[PTR:%.*]], align 4 ; NO_ASSUME-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[CMP]]) [ "deopt"() ] -; NO_ASSUME-NEXT: store i32 400, i32* [[PTR]], align 4 +; NO_ASSUME-NEXT: store i32 400, ptr [[PTR]], align 4 ; NO_ASSUME-NEXT: br i1 [[C:%.*]], label [[IF_TRUE:%.*]], label [[IF_FALSE:%.*]] ; NO_ASSUME: if.true: -; NO_ASSUME-NEXT: store i32 500, i32* [[PTR]], align 4 +; NO_ASSUME-NEXT: store i32 500, ptr [[PTR]], align 4 ; NO_ASSUME-NEXT: br label [[MERGE:%.*]] ; NO_ASSUME: if.false: -; NO_ASSUME-NEXT: store i32 600, i32* [[PTR]], align 4 +; NO_ASSUME-NEXT: store i32 600, ptr [[PTR]], align 4 ; NO_ASSUME-NEXT: br label [[MERGE]] ; NO_ASSUME: merge: ; NO_ASSUME-NEXT: ret void @@ -267,16 +267,16 @@ ; USE_ASSUME-LABEL: @test09( ; USE_ASSUME-NEXT: entry: ; USE_ASSUME-NEXT: [[CMP:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]] -; USE_ASSUME-NEXT: store i32 100, i32* [[PTR:%.*]], align 4 +; USE_ASSUME-NEXT: store i32 100, ptr [[PTR:%.*]], align 4 ; USE_ASSUME-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[CMP]]) [ "deopt"() ] -; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[PTR]], i64 4), "nonnull"(i32* [[PTR]]), "align"(i32* [[PTR]], i64 4) ] -; USE_ASSUME-NEXT: store i32 400, i32* [[PTR]], align 4 +; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[PTR]], i64 4), "nonnull"(ptr [[PTR]]), "align"(ptr [[PTR]], i64 4) ] +; USE_ASSUME-NEXT: store i32 400, ptr [[PTR]], align 4 ; USE_ASSUME-NEXT: br i1 [[C:%.*]], label [[IF_TRUE:%.*]], label [[IF_FALSE:%.*]] ; USE_ASSUME: if.true: -; USE_ASSUME-NEXT: store i32 500, i32* [[PTR]], align 4 +; USE_ASSUME-NEXT: store i32 500, ptr [[PTR]], align 4 ; USE_ASSUME-NEXT: br label [[MERGE:%.*]] ; USE_ASSUME: if.false: -; USE_ASSUME-NEXT: store i32 600, i32* [[PTR]], align 4 +; USE_ASSUME-NEXT: store i32 600, ptr [[PTR]], align 4 ; USE_ASSUME-NEXT: br label [[MERGE]] ; USE_ASSUME: merge: ; USE_ASSUME-NEXT: ret void @@ -315,15 +315,15 @@ ; CHECK-NEXT: br i1 [[C:%.*]], label [[IF_TRUE:%.*]], label [[IF_FALSE:%.*]] ; CHECK: if.true: ; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[CMP]]) [ "deopt"() ] -; CHECK-NEXT: store i32 100, i32* [[PTR:%.*]], align 4 +; CHECK-NEXT: store i32 100, ptr [[PTR:%.*]], align 4 ; CHECK-NEXT: br label [[MERGE:%.*]] ; CHECK: if.false: -; CHECK-NEXT: store i32 200, i32* [[PTR]], align 4 +; CHECK-NEXT: store i32 200, ptr [[PTR]], align 4 ; CHECK-NEXT: br label [[MERGE]] ; CHECK: merge: -; CHECK-NEXT: store i32 300, i32* [[PTR]], align 4 +; CHECK-NEXT: store i32 300, ptr [[PTR]], align 4 ; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[CMP]]) [ "deopt"() ] -; CHECK-NEXT: store i32 400, i32* [[PTR]], align 4 +; CHECK-NEXT: store i32 400, ptr [[PTR]], align 4 ; CHECK-NEXT: ret void ; @@ -401,14 +401,14 @@ ; NO_ASSUME-LABEL: @test13( ; NO_ASSUME-NEXT: [[CMP:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]] ; NO_ASSUME-NEXT: call void @llvm.assume(i1 [[CMP]]) -; NO_ASSUME-NEXT: store i32 400, i32* [[PTR:%.*]], align 4 +; NO_ASSUME-NEXT: store i32 400, ptr [[PTR:%.*]], align 4 ; NO_ASSUME-NEXT: ret void ; ; USE_ASSUME-LABEL: @test13( ; USE_ASSUME-NEXT: [[CMP:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]] ; USE_ASSUME-NEXT: call void @llvm.assume(i1 [[CMP]]) -; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[PTR:%.*]], i64 4), "nonnull"(i32* [[PTR]]), "align"(i32* [[PTR]], i64 4) ] -; USE_ASSUME-NEXT: store i32 400, i32* [[PTR]], align 4 +; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[PTR:%.*]], i64 4), "nonnull"(ptr [[PTR]]), "align"(ptr [[PTR]], i64 4) ] +; USE_ASSUME-NEXT: store i32 400, ptr [[PTR]], align 4 ; USE_ASSUME-NEXT: ret void ; @@ -432,13 +432,13 @@ ; NO_ASSUME-NEXT: entry: ; NO_ASSUME-NEXT: [[CMP:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]] ; NO_ASSUME-NEXT: call void @llvm.assume(i1 [[CMP]]) -; NO_ASSUME-NEXT: store i32 400, i32* [[PTR:%.*]], align 4 +; NO_ASSUME-NEXT: store i32 400, ptr [[PTR:%.*]], align 4 ; NO_ASSUME-NEXT: br i1 [[C:%.*]], label [[IF_TRUE:%.*]], label [[IF_FALSE:%.*]] ; NO_ASSUME: if.true: -; NO_ASSUME-NEXT: store i32 500, i32* [[PTR]], align 4 +; NO_ASSUME-NEXT: store i32 500, ptr [[PTR]], align 4 ; NO_ASSUME-NEXT: br label [[MERGE:%.*]] ; NO_ASSUME: if.false: -; NO_ASSUME-NEXT: store i32 600, i32* [[PTR]], align 4 +; NO_ASSUME-NEXT: store i32 600, ptr [[PTR]], align 4 ; NO_ASSUME-NEXT: br label [[MERGE]] ; NO_ASSUME: merge: ; NO_ASSUME-NEXT: ret void @@ -447,14 +447,14 @@ ; USE_ASSUME-NEXT: entry: ; USE_ASSUME-NEXT: [[CMP:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]] ; USE_ASSUME-NEXT: call void @llvm.assume(i1 [[CMP]]) -; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[PTR:%.*]], i64 4), "nonnull"(i32* [[PTR]]), "align"(i32* [[PTR]], i64 4) ] -; USE_ASSUME-NEXT: store i32 400, i32* [[PTR]], align 4 +; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[PTR:%.*]], i64 4), "nonnull"(ptr [[PTR]]), "align"(ptr [[PTR]], i64 4) ] +; USE_ASSUME-NEXT: store i32 400, ptr [[PTR]], align 4 ; USE_ASSUME-NEXT: br i1 [[C:%.*]], label [[IF_TRUE:%.*]], label [[IF_FALSE:%.*]] ; USE_ASSUME: if.true: -; USE_ASSUME-NEXT: store i32 500, i32* [[PTR]], align 4 +; USE_ASSUME-NEXT: store i32 500, ptr [[PTR]], align 4 ; USE_ASSUME-NEXT: br label [[MERGE:%.*]] ; USE_ASSUME: if.false: -; USE_ASSUME-NEXT: store i32 600, i32* [[PTR]], align 4 +; USE_ASSUME-NEXT: store i32 600, ptr [[PTR]], align 4 ; USE_ASSUME-NEXT: br label [[MERGE]] ; USE_ASSUME: merge: ; USE_ASSUME-NEXT: ret void @@ -494,15 +494,15 @@ ; CHECK-NEXT: br i1 [[C:%.*]], label [[IF_TRUE:%.*]], label [[IF_FALSE:%.*]] ; CHECK: if.true: ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]]) -; CHECK-NEXT: store i32 100, i32* [[PTR:%.*]], align 4 +; CHECK-NEXT: store i32 100, ptr [[PTR:%.*]], align 4 ; CHECK-NEXT: br label [[MERGE:%.*]] ; CHECK: if.false: -; CHECK-NEXT: store i32 200, i32* [[PTR]], align 4 +; CHECK-NEXT: store i32 200, ptr [[PTR]], align 4 ; CHECK-NEXT: br label [[MERGE]] ; CHECK: merge: -; CHECK-NEXT: store i32 300, i32* [[PTR]], align 4 +; CHECK-NEXT: store i32 300, ptr [[PTR]], align 4 ; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[CMP]]) [ "deopt"() ] -; CHECK-NEXT: store i32 400, i32* [[PTR]], align 4 +; CHECK-NEXT: store i32 400, ptr [[PTR]], align 4 ; CHECK-NEXT: ret void ; Index: llvm/test/Transforms/EarlyCSE/invariant-loads.ll =================================================================== --- llvm/test/Transforms/EarlyCSE/invariant-loads.ll +++ llvm/test/Transforms/EarlyCSE/invariant-loads.ll @@ -1,22 +1,22 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -S -early-cse -earlycse-debug-hash < %s | FileCheck %s --check-prefixes=CHECK,NO_ASSUME -; RUN: opt -S -basic-aa -early-cse-memssa < %s | FileCheck %s --check-prefixes=CHECK,NO_ASSUME -; RUN: opt -S -basic-aa -early-cse-memssa --enable-knowledge-retention < %s | FileCheck %s --check-prefixes=CHECK,USE_ASSUME +; RUN: opt -S -early-cse -earlycse-debug-hash -normalize-opaque-pointers < %s | FileCheck %s --check-prefixes=CHECK,NO_ASSUME +; RUN: opt -S -basic-aa -early-cse-memssa -normalize-opaque-pointers < %s | FileCheck %s --check-prefixes=CHECK,NO_ASSUME +; RUN: opt -S -basic-aa -early-cse-memssa --enable-knowledge-retention -normalize-opaque-pointers < %s | FileCheck %s --check-prefixes=CHECK,USE_ASSUME declare void @clobber_and_use(i32) define void @f_0(i32* %ptr) { ; NO_ASSUME-LABEL: @f_0( -; NO_ASSUME-NEXT: [[VAL0:%.*]] = load i32, i32* [[PTR:%.*]], align 4, !invariant.load !0 +; NO_ASSUME-NEXT: [[VAL0:%.*]] = load i32, ptr [[PTR:%.*]], align 4, !invariant.load !0 ; NO_ASSUME-NEXT: call void @clobber_and_use(i32 [[VAL0]]) ; NO_ASSUME-NEXT: call void @clobber_and_use(i32 [[VAL0]]) ; NO_ASSUME-NEXT: call void @clobber_and_use(i32 [[VAL0]]) ; NO_ASSUME-NEXT: ret void ; ; USE_ASSUME-LABEL: @f_0( -; USE_ASSUME-NEXT: [[VAL0:%.*]] = load i32, i32* [[PTR:%.*]], align 4, !invariant.load !0 +; USE_ASSUME-NEXT: [[VAL0:%.*]] = load i32, ptr [[PTR:%.*]], align 4, !invariant.load !0 ; USE_ASSUME-NEXT: call void @clobber_and_use(i32 [[VAL0]]) -; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[PTR]], i64 4), "nonnull"(i32* [[PTR]]), "align"(i32* [[PTR]], i64 4) ] +; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[PTR]], i64 4), "nonnull"(ptr [[PTR]]), "align"(ptr [[PTR]], i64 4) ] ; USE_ASSUME-NEXT: call void @clobber_and_use(i32 [[VAL0]]) ; USE_ASSUME-NEXT: call void @clobber_and_use(i32 [[VAL0]]) ; USE_ASSUME-NEXT: ret void @@ -34,15 +34,15 @@ define void @f_1(i32* %ptr) { ; We can forward invariant loads to non-invariant loads. ; NO_ASSUME-LABEL: @f_1( -; NO_ASSUME-NEXT: [[VAL0:%.*]] = load i32, i32* [[PTR:%.*]], align 4, !invariant.load !0 +; NO_ASSUME-NEXT: [[VAL0:%.*]] = load i32, ptr [[PTR:%.*]], align 4, !invariant.load !0 ; NO_ASSUME-NEXT: call void @clobber_and_use(i32 [[VAL0]]) ; NO_ASSUME-NEXT: call void @clobber_and_use(i32 [[VAL0]]) ; NO_ASSUME-NEXT: ret void ; ; USE_ASSUME-LABEL: @f_1( -; USE_ASSUME-NEXT: [[VAL0:%.*]] = load i32, i32* [[PTR:%.*]], align 4, !invariant.load !0 +; USE_ASSUME-NEXT: [[VAL0:%.*]] = load i32, ptr [[PTR:%.*]], align 4, !invariant.load !0 ; USE_ASSUME-NEXT: call void @clobber_and_use(i32 [[VAL0]]) -; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[PTR]], i64 4), "nonnull"(i32* [[PTR]]), "align"(i32* [[PTR]], i64 4) ] +; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[PTR]], i64 4), "nonnull"(ptr [[PTR]]), "align"(ptr [[PTR]], i64 4) ] ; USE_ASSUME-NEXT: call void @clobber_and_use(i32 [[VAL0]]) ; USE_ASSUME-NEXT: ret void ; @@ -57,15 +57,15 @@ define void @f_2(i32* %ptr) { ; We can forward a non-invariant load into an invariant load. ; NO_ASSUME-LABEL: @f_2( -; NO_ASSUME-NEXT: [[VAL0:%.*]] = load i32, i32* [[PTR:%.*]], align 4 +; NO_ASSUME-NEXT: [[VAL0:%.*]] = load i32, ptr [[PTR:%.*]], align 4 ; NO_ASSUME-NEXT: call void @clobber_and_use(i32 [[VAL0]]) ; NO_ASSUME-NEXT: call void @clobber_and_use(i32 [[VAL0]]) ; NO_ASSUME-NEXT: ret void ; ; USE_ASSUME-LABEL: @f_2( -; USE_ASSUME-NEXT: [[VAL0:%.*]] = load i32, i32* [[PTR:%.*]], align 4 +; USE_ASSUME-NEXT: [[VAL0:%.*]] = load i32, ptr [[PTR:%.*]], align 4 ; USE_ASSUME-NEXT: call void @clobber_and_use(i32 [[VAL0]]) -; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[PTR]], i64 4), "nonnull"(i32* [[PTR]]), "align"(i32* [[PTR]], i64 4) ] +; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[PTR]], i64 4), "nonnull"(ptr [[PTR]]), "align"(ptr [[PTR]], i64 4) ] ; USE_ASSUME-NEXT: call void @clobber_and_use(i32 [[VAL0]]) ; USE_ASSUME-NEXT: ret void ; @@ -79,7 +79,7 @@ define void @f_3(i1 %cond, i32* %ptr) { ; NO_ASSUME-LABEL: @f_3( -; NO_ASSUME-NEXT: [[VAL0:%.*]] = load i32, i32* [[PTR:%.*]], align 4, !invariant.load !0 +; NO_ASSUME-NEXT: [[VAL0:%.*]] = load i32, ptr [[PTR:%.*]], align 4, !invariant.load !0 ; NO_ASSUME-NEXT: call void @clobber_and_use(i32 [[VAL0]]) ; NO_ASSUME-NEXT: br i1 [[COND:%.*]], label [[LEFT:%.*]], label [[RIGHT:%.*]] ; NO_ASSUME: left: @@ -89,11 +89,11 @@ ; NO_ASSUME-NEXT: ret void ; ; USE_ASSUME-LABEL: @f_3( -; USE_ASSUME-NEXT: [[VAL0:%.*]] = load i32, i32* [[PTR:%.*]], align 4, !invariant.load !0 +; USE_ASSUME-NEXT: [[VAL0:%.*]] = load i32, ptr [[PTR:%.*]], align 4, !invariant.load !0 ; USE_ASSUME-NEXT: call void @clobber_and_use(i32 [[VAL0]]) ; USE_ASSUME-NEXT: br i1 [[COND:%.*]], label [[LEFT:%.*]], label [[RIGHT:%.*]] ; USE_ASSUME: left: -; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[PTR]], i64 4), "nonnull"(i32* [[PTR]]), "align"(i32* [[PTR]], i64 4) ] +; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[PTR]], i64 4), "nonnull"(ptr [[PTR]]), "align"(ptr [[PTR]], i64 4) ] ; USE_ASSUME-NEXT: call void @clobber_and_use(i32 [[VAL0]]) ; USE_ASSUME-NEXT: ret void ; USE_ASSUME: right: @@ -119,11 +119,11 @@ ; CHECK-LABEL: @f_4( ; CHECK-NEXT: br i1 [[COND:%.*]], label [[LEFT:%.*]], label [[MERGE:%.*]] ; CHECK: left: -; CHECK-NEXT: [[VAL0:%.*]] = load i32, i32* [[PTR:%.*]], align 4, !invariant.load !0 +; CHECK-NEXT: [[VAL0:%.*]] = load i32, ptr [[PTR:%.*]], align 4, !invariant.load !0 ; CHECK-NEXT: call void @clobber_and_use(i32 [[VAL0]]) ; CHECK-NEXT: br label [[MERGE]] ; CHECK: merge: -; CHECK-NEXT: [[VAL1:%.*]] = load i32, i32* [[PTR]], align 4 +; CHECK-NEXT: [[VAL1:%.*]] = load i32, ptr [[PTR]], align 4 ; CHECK-NEXT: call void @clobber_and_use(i32 [[VAL1]]) ; CHECK-NEXT: ret void ; @@ -148,14 +148,14 @@ ; to restore the same unchanging value. define void @test_dse1(i32* %p) { ; NO_ASSUME-LABEL: @test_dse1( -; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P:%.*]], align 4, !invariant.load !0 +; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, ptr [[P:%.*]], align 4, !invariant.load !0 ; NO_ASSUME-NEXT: call void @clobber_and_use(i32 [[V1]]) ; NO_ASSUME-NEXT: ret void ; ; USE_ASSUME-LABEL: @test_dse1( -; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P:%.*]], align 4, !invariant.load !0 +; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, ptr [[P:%.*]], align 4, !invariant.load !0 ; USE_ASSUME-NEXT: call void @clobber_and_use(i32 [[V1]]) -; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]), "align"(i32* [[P]], i64 4) ] +; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[P]], i64 4), "nonnull"(ptr [[P]]), "align"(ptr [[P]], i64 4) ] ; USE_ASSUME-NEXT: ret void ; %v1 = load i32, i32* %p, !invariant.load !{} @@ -167,9 +167,9 @@ ; By assumption, v1 must equal v2 (TODO) define void @test_false_negative_dse2(i32* %p, i32 %v2) { ; CHECK-LABEL: @test_false_negative_dse2( -; CHECK-NEXT: [[V1:%.*]] = load i32, i32* [[P:%.*]], align 4, !invariant.load !0 +; CHECK-NEXT: [[V1:%.*]] = load i32, ptr [[P:%.*]], align 4, !invariant.load !0 ; CHECK-NEXT: call void @clobber_and_use(i32 [[V1]]) -; CHECK-NEXT: store i32 [[V2:%.*]], i32* [[P]], align 4 +; CHECK-NEXT: store i32 [[V2:%.*]], ptr [[P]], align 4 ; CHECK-NEXT: ret void ; %v1 = load i32, i32* %p, !invariant.load !{} @@ -182,15 +182,15 @@ ; it lets us remove later loads not explicitly marked invariant define void @test_scope_start_without_load(i32* %p) { ; NO_ASSUME-LABEL: @test_scope_start_without_load( -; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P:%.*]], align 4 +; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, ptr [[P:%.*]], align 4 ; NO_ASSUME-NEXT: [[ADD:%.*]] = add i32 [[V1]], [[V1]] ; NO_ASSUME-NEXT: call void @clobber_and_use(i32 [[ADD]]) ; NO_ASSUME-NEXT: call void @clobber_and_use(i32 [[V1]]) ; NO_ASSUME-NEXT: ret void ; ; USE_ASSUME-LABEL: @test_scope_start_without_load( -; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P:%.*]], align 4 -; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]), "align"(i32* [[P]], i64 4) ] +; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, ptr [[P:%.*]], align 4 +; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[P]], i64 4), "nonnull"(ptr [[P]]), "align"(ptr [[P]], i64 4) ] ; USE_ASSUME-NEXT: [[ADD:%.*]] = add i32 [[V1]], [[V1]] ; USE_ASSUME-NEXT: call void @clobber_and_use(i32 [[ADD]]) ; USE_ASSUME-NEXT: call void @clobber_and_use(i32 [[V1]]) @@ -210,7 +210,7 @@ ; load define void @test_scope_restart(i32* %p) { ; NO_ASSUME-LABEL: @test_scope_restart( -; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P:%.*]], align 4, !invariant.load !0 +; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, ptr [[P:%.*]], align 4, !invariant.load !0 ; NO_ASSUME-NEXT: call void @clobber_and_use(i32 [[V1]]) ; NO_ASSUME-NEXT: [[ADD:%.*]] = add i32 [[V1]], [[V1]] ; NO_ASSUME-NEXT: call void @clobber_and_use(i32 [[ADD]]) @@ -218,9 +218,9 @@ ; NO_ASSUME-NEXT: ret void ; ; USE_ASSUME-LABEL: @test_scope_restart( -; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P:%.*]], align 4, !invariant.load !0 +; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, ptr [[P:%.*]], align 4, !invariant.load !0 ; USE_ASSUME-NEXT: call void @clobber_and_use(i32 [[V1]]) -; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]), "align"(i32* [[P]], i64 4) ] +; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[P]], i64 4), "nonnull"(ptr [[P]]), "align"(ptr [[P]], i64 4) ] ; USE_ASSUME-NEXT: [[ADD:%.*]] = add i32 [[V1]], [[V1]] ; USE_ASSUME-NEXT: call void @clobber_and_use(i32 [[ADD]]) ; USE_ASSUME-NEXT: call void @clobber_and_use(i32 [[V1]]) Index: llvm/test/Transforms/EarlyCSE/invariant.start.ll =================================================================== --- llvm/test/Transforms/EarlyCSE/invariant.start.ll +++ llvm/test/Transforms/EarlyCSE/invariant.start.ll @@ -1,7 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature -; RUN: opt < %s -S -early-cse -earlycse-debug-hash | FileCheck %s --check-prefixes=CHECK,NO_ASSUME -; RUN: opt < %s -S -early-cse --enable-knowledge-retention | FileCheck %s --check-prefixes=CHECK,USE_ASSUME -; RUN: opt < %s -S -passes=early-cse | FileCheck %s --check-prefixes=CHECK,NO_ASSUME +; RUN: opt < %s -S -early-cse -earlycse-debug-hash -normalize-opaque-pointers | FileCheck %s --check-prefixes=CHECK,NO_ASSUME +; RUN: opt < %s -S -early-cse --enable-knowledge-retention -normalize-opaque-pointers | FileCheck %s --check-prefixes=CHECK,USE_ASSUME +; RUN: opt < %s -S -passes=early-cse -normalize-opaque-pointers | FileCheck %s --check-prefixes=CHECK,NO_ASSUME declare {}* @llvm.invariant.start.p0i8(i64, i8* nocapture) nounwind readonly declare void @llvm.invariant.end.p0i8({}*, i64, i8* nocapture) nounwind @@ -10,16 +10,16 @@ ; clobber memory define i8 @test_bypass1(i8 *%P) { ; NO_ASSUME-LABEL: define {{[^@]+}}@test_bypass1 -; NO_ASSUME-SAME: (i8* [[P:%.*]]) -; NO_ASSUME-NEXT: [[V1:%.*]] = load i8, i8* [[P]], align 1 -; NO_ASSUME-NEXT: [[I:%.*]] = call {}* @llvm.invariant.start.p0i8(i64 1, i8* [[P]]) +; NO_ASSUME-SAME: (ptr [[P:%.*]]) { +; NO_ASSUME-NEXT: [[V1:%.*]] = load i8, ptr [[P]], align 1 +; NO_ASSUME-NEXT: [[I:%.*]] = call ptr @llvm.invariant.start.p0(i64 1, ptr [[P]]) ; NO_ASSUME-NEXT: ret i8 0 ; ; USE_ASSUME-LABEL: define {{[^@]+}}@test_bypass1 -; USE_ASSUME-SAME: (i8* [[P:%.*]]) -; USE_ASSUME-NEXT: [[V1:%.*]] = load i8, i8* [[P]], align 1 -; USE_ASSUME-NEXT: [[I:%.*]] = call {}* @llvm.invariant.start.p0i8(i64 1, i8* [[P]]) -; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8* [[P]], i64 1), "nonnull"(i8* [[P]]) ] +; USE_ASSUME-SAME: (ptr [[P:%.*]]) { +; USE_ASSUME-NEXT: [[V1:%.*]] = load i8, ptr [[P]], align 1 +; USE_ASSUME-NEXT: [[I:%.*]] = call ptr @llvm.invariant.start.p0(i64 1, ptr [[P]]) +; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[P]], i64 1), "nonnull"(ptr [[P]]) ] ; USE_ASSUME-NEXT: ret i8 0 ; @@ -34,16 +34,16 @@ ; Trivial Store->load forwarding over invariant.start define i8 @test_bypass2(i8 *%P) { ; NO_ASSUME-LABEL: define {{[^@]+}}@test_bypass2 -; NO_ASSUME-SAME: (i8* [[P:%.*]]) -; NO_ASSUME-NEXT: store i8 42, i8* [[P]], align 1 -; NO_ASSUME-NEXT: [[I:%.*]] = call {}* @llvm.invariant.start.p0i8(i64 1, i8* [[P]]) +; NO_ASSUME-SAME: (ptr [[P:%.*]]) { +; NO_ASSUME-NEXT: store i8 42, ptr [[P]], align 1 +; NO_ASSUME-NEXT: [[I:%.*]] = call ptr @llvm.invariant.start.p0(i64 1, ptr [[P]]) ; NO_ASSUME-NEXT: ret i8 42 ; ; USE_ASSUME-LABEL: define {{[^@]+}}@test_bypass2 -; USE_ASSUME-SAME: (i8* [[P:%.*]]) -; USE_ASSUME-NEXT: store i8 42, i8* [[P]], align 1 -; USE_ASSUME-NEXT: [[I:%.*]] = call {}* @llvm.invariant.start.p0i8(i64 1, i8* [[P]]) -; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8* [[P]], i64 1), "nonnull"(i8* [[P]]) ] +; USE_ASSUME-SAME: (ptr [[P:%.*]]) { +; USE_ASSUME-NEXT: store i8 42, ptr [[P]], align 1 +; USE_ASSUME-NEXT: [[I:%.*]] = call ptr @llvm.invariant.start.p0(i64 1, ptr [[P]]) +; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[P]], i64 1), "nonnull"(ptr [[P]]) ] ; USE_ASSUME-NEXT: ret i8 42 ; @@ -58,16 +58,16 @@ ; of invariant.start. define void @test_bypass3(i8* %P) { ; NO_ASSUME-LABEL: define {{[^@]+}}@test_bypass3 -; NO_ASSUME-SAME: (i8* [[P:%.*]]) -; NO_ASSUME-NEXT: [[I:%.*]] = call {}* @llvm.invariant.start.p0i8(i64 1, i8* [[P]]) -; NO_ASSUME-NEXT: store i8 60, i8* [[P]], align 1 +; NO_ASSUME-SAME: (ptr [[P:%.*]]) { +; NO_ASSUME-NEXT: [[I:%.*]] = call ptr @llvm.invariant.start.p0(i64 1, ptr [[P]]) +; NO_ASSUME-NEXT: store i8 60, ptr [[P]], align 1 ; NO_ASSUME-NEXT: ret void ; ; USE_ASSUME-LABEL: define {{[^@]+}}@test_bypass3 -; USE_ASSUME-SAME: (i8* [[P:%.*]]) -; USE_ASSUME-NEXT: [[I:%.*]] = call {}* @llvm.invariant.start.p0i8(i64 1, i8* [[P]]) -; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8* [[P]], i64 1), "nonnull"(i8* [[P]]) ] -; USE_ASSUME-NEXT: store i8 60, i8* [[P]], align 1 +; USE_ASSUME-SAME: (ptr [[P:%.*]]) { +; USE_ASSUME-NEXT: [[I:%.*]] = call ptr @llvm.invariant.start.p0(i64 1, ptr [[P]]) +; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[P]], i64 1), "nonnull"(ptr [[P]]) ] +; USE_ASSUME-NEXT: store i8 60, ptr [[P]], align 1 ; USE_ASSUME-NEXT: ret void ; @@ -82,11 +82,11 @@ ; the invariant region, between start and end. define void @test_bypass4(i8* %P) { ; CHECK-LABEL: define {{[^@]+}}@test_bypass4 -; CHECK-SAME: (i8* [[P:%.*]]) -; CHECK-NEXT: store i8 50, i8* [[P]], align 1 -; CHECK-NEXT: [[I:%.*]] = call {}* @llvm.invariant.start.p0i8(i64 1, i8* [[P]]) -; CHECK-NEXT: call void @llvm.invariant.end.p0i8({}* [[I]], i64 1, i8* [[P]]) -; CHECK-NEXT: store i8 60, i8* [[P]], align 1 +; CHECK-SAME: (ptr [[P:%.*]]) { +; CHECK-NEXT: store i8 50, ptr [[P]], align 1 +; CHECK-NEXT: [[I:%.*]] = call ptr @llvm.invariant.start.p0(i64 1, ptr [[P]]) +; CHECK-NEXT: call void @llvm.invariant.end.p0(ptr [[I]], i64 1, ptr [[P]]) +; CHECK-NEXT: store i8 60, ptr [[P]], align 1 ; CHECK-NEXT: ret void ; @@ -105,18 +105,18 @@ define i32 @test_before_load(i32* %p) { ; NO_ASSUME-LABEL: define {{[^@]+}}@test_before_load -; NO_ASSUME-SAME: (i32* [[P:%.*]]) -; NO_ASSUME-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]]) -; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4 +; NO_ASSUME-SAME: (ptr [[P:%.*]]) { +; NO_ASSUME-NEXT: [[TMP1:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]]) +; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, ptr [[P]], align 4 ; NO_ASSUME-NEXT: call void @clobber() ; NO_ASSUME-NEXT: ret i32 0 ; ; USE_ASSUME-LABEL: define {{[^@]+}}@test_before_load -; USE_ASSUME-SAME: (i32* [[P:%.*]]) -; USE_ASSUME-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]]) -; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4 +; USE_ASSUME-SAME: (ptr [[P:%.*]]) { +; USE_ASSUME-NEXT: [[TMP1:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]]) +; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, ptr [[P]], align 4 ; USE_ASSUME-NEXT: call void @clobber() -; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]), "align"(i32* [[P]], i64 4) ] +; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[P]], i64 4), "nonnull"(ptr [[P]]), "align"(ptr [[P]], i64 4) ] ; USE_ASSUME-NEXT: ret i32 0 ; call {}* @llvm.invariant.start.p0i32(i64 4, i32* %p) @@ -129,18 +129,18 @@ define i32 @test_before_clobber(i32* %p) { ; NO_ASSUME-LABEL: define {{[^@]+}}@test_before_clobber -; NO_ASSUME-SAME: (i32* [[P:%.*]]) -; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4 -; NO_ASSUME-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]]) +; NO_ASSUME-SAME: (ptr [[P:%.*]]) { +; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, ptr [[P]], align 4 +; NO_ASSUME-NEXT: [[TMP1:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]]) ; NO_ASSUME-NEXT: call void @clobber() ; NO_ASSUME-NEXT: ret i32 0 ; ; USE_ASSUME-LABEL: define {{[^@]+}}@test_before_clobber -; USE_ASSUME-SAME: (i32* [[P:%.*]]) -; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4 -; USE_ASSUME-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]]) +; USE_ASSUME-SAME: (ptr [[P:%.*]]) { +; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, ptr [[P]], align 4 +; USE_ASSUME-NEXT: [[TMP1:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]]) ; USE_ASSUME-NEXT: call void @clobber() -; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]), "align"(i32* [[P]], i64 4) ] +; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[P]], i64 4), "nonnull"(ptr [[P]]), "align"(ptr [[P]], i64 4) ] ; USE_ASSUME-NEXT: ret i32 0 ; %v1 = load i32, i32* %p @@ -153,20 +153,20 @@ define i32 @test_duplicate_scope(i32* %p) { ; NO_ASSUME-LABEL: define {{[^@]+}}@test_duplicate_scope -; NO_ASSUME-SAME: (i32* [[P:%.*]]) -; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4 -; NO_ASSUME-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]]) +; NO_ASSUME-SAME: (ptr [[P:%.*]]) { +; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, ptr [[P]], align 4 +; NO_ASSUME-NEXT: [[TMP1:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]]) ; NO_ASSUME-NEXT: call void @clobber() -; NO_ASSUME-NEXT: [[TMP2:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]]) +; NO_ASSUME-NEXT: [[TMP2:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]]) ; NO_ASSUME-NEXT: ret i32 0 ; ; USE_ASSUME-LABEL: define {{[^@]+}}@test_duplicate_scope -; USE_ASSUME-SAME: (i32* [[P:%.*]]) -; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4 -; USE_ASSUME-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]]) +; USE_ASSUME-SAME: (ptr [[P:%.*]]) { +; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, ptr [[P]], align 4 +; USE_ASSUME-NEXT: [[TMP1:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]]) ; USE_ASSUME-NEXT: call void @clobber() -; USE_ASSUME-NEXT: [[TMP2:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]]) -; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]), "align"(i32* [[P]], i64 4) ] +; USE_ASSUME-NEXT: [[TMP2:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]]) +; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[P]], i64 4), "nonnull"(ptr [[P]]), "align"(ptr [[P]], i64 4) ] ; USE_ASSUME-NEXT: ret i32 0 ; %v1 = load i32, i32* %p @@ -180,20 +180,20 @@ define i32 @test_unanalzyable_load(i32* %p) { ; NO_ASSUME-LABEL: define {{[^@]+}}@test_unanalzyable_load -; NO_ASSUME-SAME: (i32* [[P:%.*]]) -; NO_ASSUME-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]]) +; NO_ASSUME-SAME: (ptr [[P:%.*]]) { +; NO_ASSUME-NEXT: [[TMP1:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]]) ; NO_ASSUME-NEXT: call void @clobber() -; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4 +; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, ptr [[P]], align 4 ; NO_ASSUME-NEXT: call void @clobber() ; NO_ASSUME-NEXT: ret i32 0 ; ; USE_ASSUME-LABEL: define {{[^@]+}}@test_unanalzyable_load -; USE_ASSUME-SAME: (i32* [[P:%.*]]) -; USE_ASSUME-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]]) +; USE_ASSUME-SAME: (ptr [[P:%.*]]) { +; USE_ASSUME-NEXT: [[TMP1:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]]) ; USE_ASSUME-NEXT: call void @clobber() -; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4 +; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, ptr [[P]], align 4 ; USE_ASSUME-NEXT: call void @clobber() -; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]), "align"(i32* [[P]], i64 4) ] +; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[P]], i64 4), "nonnull"(ptr [[P]]), "align"(ptr [[P]], i64 4) ] ; USE_ASSUME-NEXT: ret i32 0 ; call {}* @llvm.invariant.start.p0i32(i64 4, i32* %p) @@ -207,11 +207,11 @@ define i32 @test_negative_after_clobber(i32* %p) { ; CHECK-LABEL: define {{[^@]+}}@test_negative_after_clobber -; CHECK-SAME: (i32* [[P:%.*]]) -; CHECK-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4 +; CHECK-SAME: (ptr [[P:%.*]]) { +; CHECK-NEXT: [[V1:%.*]] = load i32, ptr [[P]], align 4 ; CHECK-NEXT: call void @clobber() -; CHECK-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]]) -; CHECK-NEXT: [[V2:%.*]] = load i32, i32* [[P]], align 4 +; CHECK-NEXT: [[TMP1:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]]) +; CHECK-NEXT: [[V2:%.*]] = load i32, ptr [[P]], align 4 ; CHECK-NEXT: [[SUB:%.*]] = sub i32 [[V1]], [[V2]] ; CHECK-NEXT: ret i32 [[SUB]] ; @@ -225,9 +225,9 @@ define i32 @test_merge(i32* %p, i1 %cnd) { ; NO_ASSUME-LABEL: define {{[^@]+}}@test_merge -; NO_ASSUME-SAME: (i32* [[P:%.*]], i1 [[CND:%.*]]) -; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4 -; NO_ASSUME-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]]) +; NO_ASSUME-SAME: (ptr [[P:%.*]], i1 [[CND:%.*]]) { +; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, ptr [[P]], align 4 +; NO_ASSUME-NEXT: [[TMP1:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]]) ; NO_ASSUME-NEXT: br i1 [[CND]], label [[MERGE:%.*]], label [[TAKEN:%.*]] ; NO_ASSUME: taken: ; NO_ASSUME-NEXT: call void @clobber() @@ -236,15 +236,15 @@ ; NO_ASSUME-NEXT: ret i32 0 ; ; USE_ASSUME-LABEL: define {{[^@]+}}@test_merge -; USE_ASSUME-SAME: (i32* [[P:%.*]], i1 [[CND:%.*]]) -; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4 -; USE_ASSUME-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]]) +; USE_ASSUME-SAME: (ptr [[P:%.*]], i1 [[CND:%.*]]) { +; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, ptr [[P]], align 4 +; USE_ASSUME-NEXT: [[TMP1:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]]) ; USE_ASSUME-NEXT: br i1 [[CND]], label [[MERGE:%.*]], label [[TAKEN:%.*]] ; USE_ASSUME: taken: ; USE_ASSUME-NEXT: call void @clobber() ; USE_ASSUME-NEXT: br label [[MERGE]] ; USE_ASSUME: merge: -; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]), "align"(i32* [[P]], i64 4) ] +; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[P]], i64 4), "nonnull"(ptr [[P]]), "align"(ptr [[P]], i64 4) ] ; USE_ASSUME-NEXT: ret i32 0 ; %v1 = load i32, i32* %p @@ -262,15 +262,15 @@ define i32 @test_negative_after_mergeclobber(i32* %p, i1 %cnd) { ; CHECK-LABEL: define {{[^@]+}}@test_negative_after_mergeclobber -; CHECK-SAME: (i32* [[P:%.*]], i1 [[CND:%.*]]) -; CHECK-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4 +; CHECK-SAME: (ptr [[P:%.*]], i1 [[CND:%.*]]) { +; CHECK-NEXT: [[V1:%.*]] = load i32, ptr [[P]], align 4 ; CHECK-NEXT: br i1 [[CND]], label [[MERGE:%.*]], label [[TAKEN:%.*]] ; CHECK: taken: ; CHECK-NEXT: call void @clobber() ; CHECK-NEXT: br label [[MERGE]] ; CHECK: merge: -; CHECK-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]]) -; CHECK-NEXT: [[V2:%.*]] = load i32, i32* [[P]], align 4 +; CHECK-NEXT: [[TMP1:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]]) +; CHECK-NEXT: [[V2:%.*]] = load i32, ptr [[P]], align 4 ; CHECK-NEXT: [[SUB:%.*]] = sub i32 [[V1]], [[V2]] ; CHECK-NEXT: ret i32 [[SUB]] ; @@ -291,15 +291,15 @@ ; merging facts along distinct paths. define i32 @test_false_negative_merge(i32* %p, i1 %cnd) { ; CHECK-LABEL: define {{[^@]+}}@test_false_negative_merge -; CHECK-SAME: (i32* [[P:%.*]], i1 [[CND:%.*]]) -; CHECK-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4 +; CHECK-SAME: (ptr [[P:%.*]], i1 [[CND:%.*]]) { +; CHECK-NEXT: [[V1:%.*]] = load i32, ptr [[P]], align 4 ; CHECK-NEXT: br i1 [[CND]], label [[MERGE:%.*]], label [[TAKEN:%.*]] ; CHECK: taken: -; CHECK-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]]) +; CHECK-NEXT: [[TMP1:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]]) ; CHECK-NEXT: call void @clobber() ; CHECK-NEXT: br label [[MERGE]] ; CHECK: merge: -; CHECK-NEXT: [[V2:%.*]] = load i32, i32* [[P]], align 4 +; CHECK-NEXT: [[V2:%.*]] = load i32, ptr [[P]], align 4 ; CHECK-NEXT: [[SUB:%.*]] = sub i32 [[V1]], [[V2]] ; CHECK-NEXT: ret i32 [[SUB]] ; @@ -318,10 +318,10 @@ define i32 @test_merge_unanalyzable_load(i32* %p, i1 %cnd) { ; NO_ASSUME-LABEL: define {{[^@]+}}@test_merge_unanalyzable_load -; NO_ASSUME-SAME: (i32* [[P:%.*]], i1 [[CND:%.*]]) -; NO_ASSUME-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]]) +; NO_ASSUME-SAME: (ptr [[P:%.*]], i1 [[CND:%.*]]) { +; NO_ASSUME-NEXT: [[TMP1:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]]) ; NO_ASSUME-NEXT: call void @clobber() -; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4 +; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, ptr [[P]], align 4 ; NO_ASSUME-NEXT: br i1 [[CND]], label [[MERGE:%.*]], label [[TAKEN:%.*]] ; NO_ASSUME: taken: ; NO_ASSUME-NEXT: call void @clobber() @@ -330,16 +330,16 @@ ; NO_ASSUME-NEXT: ret i32 0 ; ; USE_ASSUME-LABEL: define {{[^@]+}}@test_merge_unanalyzable_load -; USE_ASSUME-SAME: (i32* [[P:%.*]], i1 [[CND:%.*]]) -; USE_ASSUME-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]]) +; USE_ASSUME-SAME: (ptr [[P:%.*]], i1 [[CND:%.*]]) { +; USE_ASSUME-NEXT: [[TMP1:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]]) ; USE_ASSUME-NEXT: call void @clobber() -; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4 +; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, ptr [[P]], align 4 ; USE_ASSUME-NEXT: br i1 [[CND]], label [[MERGE:%.*]], label [[TAKEN:%.*]] ; USE_ASSUME: taken: ; USE_ASSUME-NEXT: call void @clobber() ; USE_ASSUME-NEXT: br label [[MERGE]] ; USE_ASSUME: merge: -; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]), "align"(i32* [[P]], i64 4) ] +; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[P]], i64 4), "nonnull"(ptr [[P]]), "align"(ptr [[P]], i64 4) ] ; USE_ASSUME-NEXT: ret i32 0 ; call {}* @llvm.invariant.start.p0i32(i64 4, i32* %p) @@ -358,18 +358,18 @@ define void @test_dse_before_load(i32* %p, i1 %cnd) { ; NO_ASSUME-LABEL: define {{[^@]+}}@test_dse_before_load -; NO_ASSUME-SAME: (i32* [[P:%.*]], i1 [[CND:%.*]]) -; NO_ASSUME-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]]) -; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4 +; NO_ASSUME-SAME: (ptr [[P:%.*]], i1 [[CND:%.*]]) { +; NO_ASSUME-NEXT: [[TMP1:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]]) +; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, ptr [[P]], align 4 ; NO_ASSUME-NEXT: call void @clobber() ; NO_ASSUME-NEXT: ret void ; ; USE_ASSUME-LABEL: define {{[^@]+}}@test_dse_before_load -; USE_ASSUME-SAME: (i32* [[P:%.*]], i1 [[CND:%.*]]) -; USE_ASSUME-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]]) -; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4 +; USE_ASSUME-SAME: (ptr [[P:%.*]], i1 [[CND:%.*]]) { +; USE_ASSUME-NEXT: [[TMP1:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]]) +; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, ptr [[P]], align 4 ; USE_ASSUME-NEXT: call void @clobber() -; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]), "align"(i32* [[P]], i64 4) ] +; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[P]], i64 4), "nonnull"(ptr [[P]]), "align"(ptr [[P]], i64 4) ] ; USE_ASSUME-NEXT: ret void ; call {}* @llvm.invariant.start.p0i32(i64 4, i32* %p) @@ -381,18 +381,18 @@ define void @test_dse_after_load(i32* %p, i1 %cnd) { ; NO_ASSUME-LABEL: define {{[^@]+}}@test_dse_after_load -; NO_ASSUME-SAME: (i32* [[P:%.*]], i1 [[CND:%.*]]) -; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4 -; NO_ASSUME-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]]) +; NO_ASSUME-SAME: (ptr [[P:%.*]], i1 [[CND:%.*]]) { +; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, ptr [[P]], align 4 +; NO_ASSUME-NEXT: [[TMP1:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]]) ; NO_ASSUME-NEXT: call void @clobber() ; NO_ASSUME-NEXT: ret void ; ; USE_ASSUME-LABEL: define {{[^@]+}}@test_dse_after_load -; USE_ASSUME-SAME: (i32* [[P:%.*]], i1 [[CND:%.*]]) -; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4 -; USE_ASSUME-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]]) +; USE_ASSUME-SAME: (ptr [[P:%.*]], i1 [[CND:%.*]]) { +; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, ptr [[P]], align 4 +; USE_ASSUME-NEXT: [[TMP1:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]]) ; USE_ASSUME-NEXT: call void @clobber() -; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]), "align"(i32* [[P]], i64 4) ] +; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[P]], i64 4), "nonnull"(ptr [[P]]), "align"(ptr [[P]], i64 4) ] ; USE_ASSUME-NEXT: ret void ; %v1 = load i32, i32* %p @@ -408,12 +408,11 @@ ; passes will canonicalize away the bitcasts in this example. define i32 @test_false_negative_types(i32* %p) { ; CHECK-LABEL: define {{[^@]+}}@test_false_negative_types -; CHECK-SAME: (i32* [[P:%.*]]) -; CHECK-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]]) -; CHECK-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4 +; CHECK-SAME: (ptr [[P:%.*]]) { +; CHECK-NEXT: [[TMP1:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]]) +; CHECK-NEXT: [[V1:%.*]] = load i32, ptr [[P]], align 4 ; CHECK-NEXT: call void @clobber() -; CHECK-NEXT: [[PF:%.*]] = bitcast i32* [[P]] to float* -; CHECK-NEXT: [[V2F:%.*]] = load float, float* [[PF]], align 4 +; CHECK-NEXT: [[V2F:%.*]] = load float, ptr [[P]], align 4 ; CHECK-NEXT: [[V2:%.*]] = bitcast float [[V2F]] to i32 ; CHECK-NEXT: [[SUB:%.*]] = sub i32 [[V1]], [[V2]] ; CHECK-NEXT: ret i32 [[SUB]] @@ -430,11 +429,11 @@ define i32 @test_negative_size1(i32* %p) { ; CHECK-LABEL: define {{[^@]+}}@test_negative_size1 -; CHECK-SAME: (i32* [[P:%.*]]) -; CHECK-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 3, i32* [[P]]) -; CHECK-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4 +; CHECK-SAME: (ptr [[P:%.*]]) { +; CHECK-NEXT: [[TMP1:%.*]] = call ptr @llvm.invariant.start.p0(i64 3, ptr [[P]]) +; CHECK-NEXT: [[V1:%.*]] = load i32, ptr [[P]], align 4 ; CHECK-NEXT: call void @clobber() -; CHECK-NEXT: [[V2:%.*]] = load i32, i32* [[P]], align 4 +; CHECK-NEXT: [[V2:%.*]] = load i32, ptr [[P]], align 4 ; CHECK-NEXT: [[SUB:%.*]] = sub i32 [[V1]], [[V2]] ; CHECK-NEXT: ret i32 [[SUB]] ; @@ -448,11 +447,11 @@ define i32 @test_negative_size2(i32* %p) { ; CHECK-LABEL: define {{[^@]+}}@test_negative_size2 -; CHECK-SAME: (i32* [[P:%.*]]) -; CHECK-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 0, i32* [[P]]) -; CHECK-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4 +; CHECK-SAME: (ptr [[P:%.*]]) { +; CHECK-NEXT: [[TMP1:%.*]] = call ptr @llvm.invariant.start.p0(i64 0, ptr [[P]]) +; CHECK-NEXT: [[V1:%.*]] = load i32, ptr [[P]], align 4 ; CHECK-NEXT: call void @clobber() -; CHECK-NEXT: [[V2:%.*]] = load i32, i32* [[P]], align 4 +; CHECK-NEXT: [[V2:%.*]] = load i32, ptr [[P]], align 4 ; CHECK-NEXT: [[SUB:%.*]] = sub i32 [[V1]], [[V2]] ; CHECK-NEXT: ret i32 [[SUB]] ; @@ -466,12 +465,12 @@ define i32 @test_negative_scope(i32* %p) { ; CHECK-LABEL: define {{[^@]+}}@test_negative_scope -; CHECK-SAME: (i32* [[P:%.*]]) -; CHECK-NEXT: [[SCOPE:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]]) -; CHECK-NEXT: call void @llvm.invariant.end.p0i32({}* [[SCOPE]], i64 4, i32* [[P]]) -; CHECK-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4 +; CHECK-SAME: (ptr [[P:%.*]]) { +; CHECK-NEXT: [[SCOPE:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]]) +; CHECK-NEXT: call void @llvm.invariant.end.p0(ptr [[SCOPE]], i64 4, ptr [[P]]) +; CHECK-NEXT: [[V1:%.*]] = load i32, ptr [[P]], align 4 ; CHECK-NEXT: call void @clobber() -; CHECK-NEXT: [[V2:%.*]] = load i32, i32* [[P]], align 4 +; CHECK-NEXT: [[V2:%.*]] = load i32, ptr [[P]], align 4 ; CHECK-NEXT: [[SUB:%.*]] = sub i32 [[V1]], [[V2]] ; CHECK-NEXT: ret i32 [[SUB]] ; @@ -486,12 +485,12 @@ define i32 @test_false_negative_scope(i32* %p) { ; CHECK-LABEL: define {{[^@]+}}@test_false_negative_scope -; CHECK-SAME: (i32* [[P:%.*]]) -; CHECK-NEXT: [[SCOPE:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]]) -; CHECK-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4 +; CHECK-SAME: (ptr [[P:%.*]]) { +; CHECK-NEXT: [[SCOPE:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]]) +; CHECK-NEXT: [[V1:%.*]] = load i32, ptr [[P]], align 4 ; CHECK-NEXT: call void @clobber() -; CHECK-NEXT: [[V2:%.*]] = load i32, i32* [[P]], align 4 -; CHECK-NEXT: call void @llvm.invariant.end.p0i32({}* [[SCOPE]], i64 4, i32* [[P]]) +; CHECK-NEXT: [[V2:%.*]] = load i32, ptr [[P]], align 4 +; CHECK-NEXT: call void @llvm.invariant.end.p0(ptr [[SCOPE]], i64 4, ptr [[P]]) ; CHECK-NEXT: [[SUB:%.*]] = sub i32 [[V1]], [[V2]] ; CHECK-NEXT: ret i32 [[SUB]] ; @@ -507,16 +506,16 @@ ; Invariant load defact starts an invariant.start scope of the appropriate size define i32 @test_invariant_load_scope(i32* %p) { ; NO_ASSUME-LABEL: define {{[^@]+}}@test_invariant_load_scope -; NO_ASSUME-SAME: (i32* [[P:%.*]]) -; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4, !invariant.load !0 +; NO_ASSUME-SAME: (ptr [[P:%.*]]) { +; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, ptr [[P]], align 4, !invariant.load !0 ; NO_ASSUME-NEXT: call void @clobber() ; NO_ASSUME-NEXT: ret i32 0 ; ; USE_ASSUME-LABEL: define {{[^@]+}}@test_invariant_load_scope -; USE_ASSUME-SAME: (i32* [[P:%.*]]) -; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4, !invariant.load !0 +; USE_ASSUME-SAME: (ptr [[P:%.*]]) { +; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, ptr [[P]], align 4, !invariant.load !0 ; USE_ASSUME-NEXT: call void @clobber() -; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]), "align"(i32* [[P]], i64 4) ] +; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[P]], i64 4), "nonnull"(ptr [[P]]), "align"(ptr [[P]], i64 4) ] ; USE_ASSUME-NEXT: ret i32 0 ; %v1 = load i32, i32* %p, !invariant.load !{} Index: llvm/test/Transforms/EarlyCSE/masked-intrinsics-unequal-masks.ll =================================================================== --- llvm/test/Transforms/EarlyCSE/masked-intrinsics-unequal-masks.ll +++ llvm/test/Transforms/EarlyCSE/masked-intrinsics-unequal-masks.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -S -early-cse < %s | FileCheck %s +; RUN: opt -S -early-cse -normalize-opaque-pointers < %s | FileCheck %s ; Unequal mask check. @@ -12,7 +12,7 @@ ; Expect the second load to be removed. define <4 x i32> @f3(<4 x i32>* %a0, <4 x i32> %a1) { ; CHECK-LABEL: @f3( -; CHECK-NEXT: [[V0:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[A0:%.*]], i32 4, <4 x i1> , <4 x i32> [[A1:%.*]]) +; CHECK-NEXT: [[V0:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[A0:%.*]], i32 4, <4 x i1> , <4 x i32> [[A1:%.*]]) ; CHECK-NEXT: [[V2:%.*]] = add <4 x i32> [[V0]], [[V0]] ; CHECK-NEXT: ret <4 x i32> [[V2]] ; @@ -26,8 +26,8 @@ ; Expect the second load to remain. define <4 x i32> @f4(<4 x i32>* %a0, <4 x i32> %a1) { ; CHECK-LABEL: @f4( -; CHECK-NEXT: [[V0:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[A0:%.*]], i32 4, <4 x i1> , <4 x i32> [[A1:%.*]]) -; CHECK-NEXT: [[V1:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[A0]], i32 4, <4 x i1> , <4 x i32> zeroinitializer) +; CHECK-NEXT: [[V0:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[A0:%.*]], i32 4, <4 x i1> , <4 x i32> [[A1:%.*]]) +; CHECK-NEXT: [[V1:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[A0]], i32 4, <4 x i1> , <4 x i32> zeroinitializer) ; CHECK-NEXT: [[V2:%.*]] = add <4 x i32> [[V0]], [[V1]] ; CHECK-NEXT: ret <4 x i32> [[V2]] ; @@ -41,8 +41,8 @@ ; Expect the second load to remain. define <4 x i32> @f5(<4 x i32>* %a0, <4 x i32> %a1) { ; CHECK-LABEL: @f5( -; CHECK-NEXT: [[V0:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[A0:%.*]], i32 4, <4 x i1> , <4 x i32> [[A1:%.*]]) -; CHECK-NEXT: [[V1:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[A0]], i32 4, <4 x i1> , <4 x i32> zeroinitializer) +; CHECK-NEXT: [[V0:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[A0:%.*]], i32 4, <4 x i1> , <4 x i32> [[A1:%.*]]) +; CHECK-NEXT: [[V1:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[A0]], i32 4, <4 x i1> , <4 x i32> zeroinitializer) ; CHECK-NEXT: [[V2:%.*]] = add <4 x i32> [[V0]], [[V1]] ; CHECK-NEXT: ret <4 x i32> [[V2]] ; @@ -59,7 +59,7 @@ ; Expect the first store to be removed. define void @f6(<4 x i32> %a0, <4 x i32>* %a1) { ; CHECK-LABEL: @f6( -; CHECK-NEXT: call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> [[A0:%.*]], <4 x i32>* [[A1:%.*]], i32 4, <4 x i1> ) +; CHECK-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[A0:%.*]], ptr [[A1:%.*]], i32 4, <4 x i1> ) ; CHECK-NEXT: ret void ; call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %a0, <4 x i32>* %a1, i32 4, <4 x i1> ) @@ -71,8 +71,8 @@ ; Expect both stores to remain. define void @f7(<4 x i32> %a0, <4 x i32>* %a1) { ; CHECK-LABEL: @f7( -; CHECK-NEXT: call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> [[A0:%.*]], <4 x i32>* [[A1:%.*]], i32 4, <4 x i1> ) -; CHECK-NEXT: call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> [[A0]], <4 x i32>* [[A1]], i32 4, <4 x i1> ) +; CHECK-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[A0:%.*]], ptr [[A1:%.*]], i32 4, <4 x i1> ) +; CHECK-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[A0]], ptr [[A1]], i32 4, <4 x i1> ) ; CHECK-NEXT: ret void ; call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %a0, <4 x i32>* %a1, i32 4, <4 x i1> ) @@ -87,7 +87,7 @@ ; Expect the store to be removed. define <4 x i32> @f8(<4 x i32>* %a0, <4 x i32> %a1) { ; CHECK-LABEL: @f8( -; CHECK-NEXT: [[V0:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[A0:%.*]], i32 4, <4 x i1> , <4 x i32> [[A1:%.*]]) +; CHECK-NEXT: [[V0:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[A0:%.*]], i32 4, <4 x i1> , <4 x i32> [[A1:%.*]]) ; CHECK-NEXT: ret <4 x i32> [[V0]] ; %v0 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %a0, i32 4, <4 x i1> , <4 x i32> %a1) @@ -99,8 +99,8 @@ ; Expect the store to remain. define <4 x i32> @f9(<4 x i32>* %a0, <4 x i32> %a1) { ; CHECK-LABEL: @f9( -; CHECK-NEXT: [[V0:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[A0:%.*]], i32 4, <4 x i1> , <4 x i32> [[A1:%.*]]) -; CHECK-NEXT: call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> [[V0]], <4 x i32>* [[A0]], i32 4, <4 x i1> ) +; CHECK-NEXT: [[V0:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[A0:%.*]], i32 4, <4 x i1> , <4 x i32> [[A1:%.*]]) +; CHECK-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[V0]], ptr [[A0]], i32 4, <4 x i1> ) ; CHECK-NEXT: ret <4 x i32> [[V0]] ; %v0 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %a0, i32 4, <4 x i1> , <4 x i32> %a1) @@ -115,7 +115,7 @@ ; Expect the load to be removed. define <4 x i32> @fa(<4 x i32> %a0, <4 x i32>* %a1) { ; CHECK-LABEL: @fa( -; CHECK-NEXT: call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> [[A0:%.*]], <4 x i32>* [[A1:%.*]], i32 4, <4 x i1> ) +; CHECK-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[A0:%.*]], ptr [[A1:%.*]], i32 4, <4 x i1> ) ; CHECK-NEXT: ret <4 x i32> [[A0]] ; call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %a0, <4 x i32>* %a1, i32 4, <4 x i1> ) @@ -127,8 +127,8 @@ ; Expect the load to remain. define <4 x i32> @fb(<4 x i32> %a0, <4 x i32>* %a1) { ; CHECK-LABEL: @fb( -; CHECK-NEXT: call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> [[A0:%.*]], <4 x i32>* [[A1:%.*]], i32 4, <4 x i1> ) -; CHECK-NEXT: [[V0:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[A1]], i32 4, <4 x i1> , <4 x i32> zeroinitializer) +; CHECK-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[A0:%.*]], ptr [[A1:%.*]], i32 4, <4 x i1> ) +; CHECK-NEXT: [[V0:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[A1]], i32 4, <4 x i1> , <4 x i32> zeroinitializer) ; CHECK-NEXT: ret <4 x i32> [[V0]] ; call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %a0, <4 x i32>* %a1, i32 4, <4 x i1> ) @@ -140,8 +140,8 @@ ; Expect the load to remain. define <4 x i32> @fc(<4 x i32> %a0, <4 x i32>* %a1) { ; CHECK-LABEL: @fc( -; CHECK-NEXT: call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> [[A0:%.*]], <4 x i32>* [[A1:%.*]], i32 4, <4 x i1> ) -; CHECK-NEXT: [[V0:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[A1]], i32 4, <4 x i1> , <4 x i32> undef) +; CHECK-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[A0:%.*]], ptr [[A1:%.*]], i32 4, <4 x i1> ) +; CHECK-NEXT: [[V0:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[A1]], i32 4, <4 x i1> , <4 x i32> undef) ; CHECK-NEXT: ret <4 x i32> [[V0]] ; call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %a0, <4 x i32>* %a1, i32 4, <4 x i1> ) Index: llvm/test/Transforms/EarlyCSE/masked-intrinsics.ll =================================================================== --- llvm/test/Transforms/EarlyCSE/masked-intrinsics.ll +++ llvm/test/Transforms/EarlyCSE/masked-intrinsics.ll @@ -1,10 +1,10 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -S -early-cse < %s | FileCheck %s +; RUN: opt -S -early-cse -normalize-opaque-pointers < %s | FileCheck %s define <128 x i8> @f0(<128 x i8>* %a0, <128 x i8> %a1, <128 x i8> %a2) { ; CHECK-LABEL: @f0( ; CHECK-NEXT: [[V0:%.*]] = icmp eq <128 x i8> [[A1:%.*]], [[A2:%.*]] -; CHECK-NEXT: call void @llvm.masked.store.v128i8.p0v128i8(<128 x i8> [[A1]], <128 x i8>* [[A0:%.*]], i32 4, <128 x i1> [[V0]]) +; CHECK-NEXT: call void @llvm.masked.store.v128i8.p0(<128 x i8> [[A1]], ptr [[A0:%.*]], i32 4, <128 x i1> [[V0]]) ; CHECK-NEXT: ret <128 x i8> [[A1]] ; %v0 = icmp eq <128 x i8> %a1, %a2 @@ -16,7 +16,7 @@ define <128 x i8> @f1(<128 x i8>* %a0, <128 x i8> %a1, <128 x i8> %a2) { ; CHECK-LABEL: @f1( ; CHECK-NEXT: [[V0:%.*]] = icmp eq <128 x i8> [[A1:%.*]], [[A2:%.*]] -; CHECK-NEXT: [[V1:%.*]] = call <128 x i8> @llvm.masked.load.v128i8.p0v128i8(<128 x i8>* [[A0:%.*]], i32 4, <128 x i1> [[V0]], <128 x i8> undef) +; CHECK-NEXT: [[V1:%.*]] = call <128 x i8> @llvm.masked.load.v128i8.p0(ptr [[A0:%.*]], i32 4, <128 x i1> [[V0]], <128 x i8> undef) ; CHECK-NEXT: ret <128 x i8> [[V1]] ; %v0 = icmp eq <128 x i8> %a1, %a2 @@ -28,7 +28,7 @@ define <128 x i8> @f2(<128 x i8>* %a0, <128 x i8> %a1, <128 x i8> %a2) { ; CHECK-LABEL: @f2( ; CHECK-NEXT: [[V0:%.*]] = icmp eq <128 x i8> [[A1:%.*]], [[A2:%.*]] -; CHECK-NEXT: [[V1:%.*]] = call <128 x i8> @llvm.masked.load.v128i8.p0v128i8(<128 x i8>* [[A0:%.*]], i32 4, <128 x i1> [[V0]], <128 x i8> undef) +; CHECK-NEXT: [[V1:%.*]] = call <128 x i8> @llvm.masked.load.v128i8.p0(ptr [[A0:%.*]], i32 4, <128 x i1> [[V0]], <128 x i8> undef) ; CHECK-NEXT: [[V3:%.*]] = add <128 x i8> [[V1]], [[V1]] ; CHECK-NEXT: ret <128 x i8> [[V3]] ; Index: llvm/test/Transforms/EarlyCSE/memoryssa.ll =================================================================== --- llvm/test/Transforms/EarlyCSE/memoryssa.ll +++ llvm/test/Transforms/EarlyCSE/memoryssa.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -S -early-cse -earlycse-debug-hash | FileCheck %s --check-prefix=CHECK-NOMEMSSA -; RUN: opt < %s -S -basic-aa -early-cse-memssa | FileCheck %s -; RUN: opt < %s -S -passes='early-cse' | FileCheck %s --check-prefix=CHECK-NOMEMSSA -; RUN: opt < %s -S -aa-pipeline=basic-aa -passes='early-cse' | FileCheck %s +; RUN: opt < %s -S -early-cse -earlycse-debug-hash -normalize-opaque-pointers | FileCheck %s --check-prefix=CHECK-NOMEMSSA +; RUN: opt < %s -S -basic-aa -early-cse-memssa -normalize-opaque-pointers | FileCheck %s +; RUN: opt < %s -S -passes='early-cse' -normalize-opaque-pointers | FileCheck %s --check-prefix=CHECK-NOMEMSSA +; RUN: opt < %s -S -aa-pipeline=basic-aa -passes='early-cse' -normalize-opaque-pointers | FileCheck %s @G1 = global i32 zeroinitializer @G2 = global i32 zeroinitializer @@ -11,15 +11,15 @@ ;; Simple load value numbering across non-clobbering store. define i32 @test1() { ; CHECK-NOMEMSSA-LABEL: @test1( -; CHECK-NOMEMSSA-NEXT: [[V1:%.*]] = load i32, i32* @G1, align 4 -; CHECK-NOMEMSSA-NEXT: store i32 0, i32* @G2, align 4 -; CHECK-NOMEMSSA-NEXT: [[V2:%.*]] = load i32, i32* @G1, align 4 +; CHECK-NOMEMSSA-NEXT: [[V1:%.*]] = load i32, ptr @G1, align 4 +; CHECK-NOMEMSSA-NEXT: store i32 0, ptr @G2, align 4 +; CHECK-NOMEMSSA-NEXT: [[V2:%.*]] = load i32, ptr @G1, align 4 ; CHECK-NOMEMSSA-NEXT: [[DIFF:%.*]] = sub i32 [[V1]], [[V2]] ; CHECK-NOMEMSSA-NEXT: ret i32 [[DIFF]] ; ; CHECK-LABEL: @test1( -; CHECK-NEXT: [[V1:%.*]] = load i32, i32* @G1, align 4 -; CHECK-NEXT: store i32 0, i32* @G2, align 4 +; CHECK-NEXT: [[V1:%.*]] = load i32, ptr @G1, align 4 +; CHECK-NEXT: store i32 0, ptr @G2, align 4 ; CHECK-NEXT: ret i32 0 ; %V1 = load i32, i32* @G1 @@ -33,15 +33,15 @@ define void @test2() { ; CHECK-NOMEMSSA-LABEL: @test2( ; CHECK-NOMEMSSA-NEXT: entry: -; CHECK-NOMEMSSA-NEXT: [[V1:%.*]] = load i32, i32* @G1, align 4 -; CHECK-NOMEMSSA-NEXT: store i32 0, i32* @G2, align 4 -; CHECK-NOMEMSSA-NEXT: store i32 [[V1]], i32* @G1, align 4 +; CHECK-NOMEMSSA-NEXT: [[V1:%.*]] = load i32, ptr @G1, align 4 +; CHECK-NOMEMSSA-NEXT: store i32 0, ptr @G2, align 4 +; CHECK-NOMEMSSA-NEXT: store i32 [[V1]], ptr @G1, align 4 ; CHECK-NOMEMSSA-NEXT: ret void ; ; CHECK-LABEL: @test2( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[V1:%.*]] = load i32, i32* @G1, align 4 -; CHECK-NEXT: store i32 0, i32* @G2, align 4 +; CHECK-NEXT: [[V1:%.*]] = load i32, ptr @G1, align 4 +; CHECK-NEXT: store i32 0, ptr @G2, align 4 ; CHECK-NEXT: ret void ; entry: @@ -56,27 +56,27 @@ define void @test_memphiopt(i1 %c, i32* %p) { ; CHECK-NOMEMSSA-LABEL: @test_memphiopt( ; CHECK-NOMEMSSA-NEXT: entry: -; CHECK-NOMEMSSA-NEXT: [[V1:%.*]] = load i32, i32* @G1, align 4 +; CHECK-NOMEMSSA-NEXT: [[V1:%.*]] = load i32, ptr @G1, align 4 ; CHECK-NOMEMSSA-NEXT: br i1 [[C:%.*]], label [[THEN:%.*]], label [[END:%.*]] ; CHECK-NOMEMSSA: then: -; CHECK-NOMEMSSA-NEXT: [[PV:%.*]] = load i32, i32* [[P:%.*]], align 4 +; CHECK-NOMEMSSA-NEXT: [[PV:%.*]] = load i32, ptr [[P:%.*]], align 4 ; CHECK-NOMEMSSA-NEXT: br label [[END]] ; CHECK-NOMEMSSA: end: -; CHECK-NOMEMSSA-NEXT: [[V2:%.*]] = load i32, i32* @G1, align 4 +; CHECK-NOMEMSSA-NEXT: [[V2:%.*]] = load i32, ptr @G1, align 4 ; CHECK-NOMEMSSA-NEXT: [[SUM:%.*]] = add i32 [[V1]], [[V2]] -; CHECK-NOMEMSSA-NEXT: store i32 [[SUM]], i32* @G2, align 4 +; CHECK-NOMEMSSA-NEXT: store i32 [[SUM]], ptr @G2, align 4 ; CHECK-NOMEMSSA-NEXT: ret void ; ; CHECK-LABEL: @test_memphiopt( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[V1:%.*]] = load i32, i32* @G1, align 4 +; CHECK-NEXT: [[V1:%.*]] = load i32, ptr @G1, align 4 ; CHECK-NEXT: br i1 [[C:%.*]], label [[THEN:%.*]], label [[END:%.*]] ; CHECK: then: -; CHECK-NEXT: [[PV:%.*]] = load i32, i32* [[P:%.*]], align 4 +; CHECK-NEXT: [[PV:%.*]] = load i32, ptr [[P:%.*]], align 4 ; CHECK-NEXT: br label [[END]] ; CHECK: end: ; CHECK-NEXT: [[SUM:%.*]] = add i32 [[V1]], [[V1]] -; CHECK-NEXT: store i32 [[SUM]], i32* @G2, align 4 +; CHECK-NEXT: store i32 [[SUM]], ptr @G2, align 4 ; CHECK-NEXT: ret void ; entry: @@ -101,27 +101,27 @@ define void @test_memphiopt2(i1 %c, i32* %p) { ; CHECK-NOMEMSSA-LABEL: @test_memphiopt2( ; CHECK-NOMEMSSA-NEXT: entry: -; CHECK-NOMEMSSA-NEXT: [[V1:%.*]] = load i32, i32* @G1, align 4 -; CHECK-NOMEMSSA-NEXT: store i32 [[V1]], i32* @G2, align 4 +; CHECK-NOMEMSSA-NEXT: [[V1:%.*]] = load i32, ptr @G1, align 4 +; CHECK-NOMEMSSA-NEXT: store i32 [[V1]], ptr @G2, align 4 ; CHECK-NOMEMSSA-NEXT: br i1 [[C:%.*]], label [[THEN:%.*]], label [[END:%.*]] ; CHECK-NOMEMSSA: then: -; CHECK-NOMEMSSA-NEXT: [[PV:%.*]] = load i32, i32* [[P:%.*]], align 4 +; CHECK-NOMEMSSA-NEXT: [[PV:%.*]] = load i32, ptr [[P:%.*]], align 4 ; CHECK-NOMEMSSA-NEXT: br label [[END]] ; CHECK-NOMEMSSA: end: -; CHECK-NOMEMSSA-NEXT: [[V2:%.*]] = load i32, i32* @G1, align 4 -; CHECK-NOMEMSSA-NEXT: store i32 [[V2]], i32* @G3, align 4 +; CHECK-NOMEMSSA-NEXT: [[V2:%.*]] = load i32, ptr @G1, align 4 +; CHECK-NOMEMSSA-NEXT: store i32 [[V2]], ptr @G3, align 4 ; CHECK-NOMEMSSA-NEXT: ret void ; ; CHECK-LABEL: @test_memphiopt2( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[V1:%.*]] = load i32, i32* @G1, align 4 -; CHECK-NEXT: store i32 [[V1]], i32* @G2, align 4 +; CHECK-NEXT: [[V1:%.*]] = load i32, ptr @G1, align 4 +; CHECK-NEXT: store i32 [[V1]], ptr @G2, align 4 ; CHECK-NEXT: br i1 [[C:%.*]], label [[THEN:%.*]], label [[END:%.*]] ; CHECK: then: -; CHECK-NEXT: [[PV:%.*]] = load i32, i32* [[P:%.*]], align 4 +; CHECK-NEXT: [[PV:%.*]] = load i32, ptr [[P:%.*]], align 4 ; CHECK-NEXT: br label [[END]] ; CHECK: end: -; CHECK-NEXT: store i32 [[V1]], i32* @G3, align 4 +; CHECK-NEXT: store i32 [[V1]], ptr @G3, align 4 ; CHECK-NEXT: ret void ; entry: @@ -145,24 +145,24 @@ define void @test_writeback_lifetimes(i32* %p) { ; CHECK-NOMEMSSA-LABEL: @test_writeback_lifetimes( ; CHECK-NOMEMSSA-NEXT: entry: -; CHECK-NOMEMSSA-NEXT: [[Q:%.*]] = getelementptr i32, i32* [[P:%.*]], i64 1 -; CHECK-NOMEMSSA-NEXT: [[PV:%.*]] = load i32, i32* [[P]], align 4 -; CHECK-NOMEMSSA-NEXT: [[QV:%.*]] = load i32, i32* [[Q]], align 4 -; CHECK-NOMEMSSA-NEXT: call void @llvm.lifetime.end.p0i32(i64 8, i32* [[P]]) -; CHECK-NOMEMSSA-NEXT: call void @llvm.lifetime.start.p0i32(i64 8, i32* [[P]]) -; CHECK-NOMEMSSA-NEXT: store i32 [[PV]], i32* [[P]], align 4 -; CHECK-NOMEMSSA-NEXT: store i32 [[QV]], i32* [[Q]], align 4 +; CHECK-NOMEMSSA-NEXT: [[Q:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 1 +; CHECK-NOMEMSSA-NEXT: [[PV:%.*]] = load i32, ptr [[P]], align 4 +; CHECK-NOMEMSSA-NEXT: [[QV:%.*]] = load i32, ptr [[Q]], align 4 +; CHECK-NOMEMSSA-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[P]]) +; CHECK-NOMEMSSA-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[P]]) +; CHECK-NOMEMSSA-NEXT: store i32 [[PV]], ptr [[P]], align 4 +; CHECK-NOMEMSSA-NEXT: store i32 [[QV]], ptr [[Q]], align 4 ; CHECK-NOMEMSSA-NEXT: ret void ; ; CHECK-LABEL: @test_writeback_lifetimes( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[Q:%.*]] = getelementptr i32, i32* [[P:%.*]], i64 1 -; CHECK-NEXT: [[PV:%.*]] = load i32, i32* [[P]], align 4 -; CHECK-NEXT: [[QV:%.*]] = load i32, i32* [[Q]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0i32(i64 8, i32* [[P]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0i32(i64 8, i32* [[P]]) -; CHECK-NEXT: store i32 [[PV]], i32* [[P]], align 4 -; CHECK-NEXT: store i32 [[QV]], i32* [[Q]], align 4 +; CHECK-NEXT: [[Q:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 1 +; CHECK-NEXT: [[PV:%.*]] = load i32, ptr [[P]], align 4 +; CHECK-NEXT: [[QV:%.*]] = load i32, ptr [[Q]], align 4 +; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[P]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[P]]) +; CHECK-NEXT: store i32 [[PV]], ptr [[P]], align 4 +; CHECK-NEXT: store i32 [[QV]], ptr [[Q]], align 4 ; CHECK-NEXT: ret void ; entry: @@ -181,22 +181,22 @@ define void @test_writeback_lifetimes_multi_arg(i32* %p, i32* %q) { ; CHECK-NOMEMSSA-LABEL: @test_writeback_lifetimes_multi_arg( ; CHECK-NOMEMSSA-NEXT: entry: -; CHECK-NOMEMSSA-NEXT: [[PV:%.*]] = load i32, i32* [[P:%.*]], align 4 -; CHECK-NOMEMSSA-NEXT: [[QV:%.*]] = load i32, i32* [[Q:%.*]], align 4 -; CHECK-NOMEMSSA-NEXT: call void @llvm.lifetime.end.p0i32(i64 8, i32* [[P]]) -; CHECK-NOMEMSSA-NEXT: call void @llvm.lifetime.start.p0i32(i64 8, i32* [[P]]) -; CHECK-NOMEMSSA-NEXT: store i32 [[PV]], i32* [[P]], align 4 -; CHECK-NOMEMSSA-NEXT: store i32 [[QV]], i32* [[Q]], align 4 +; CHECK-NOMEMSSA-NEXT: [[PV:%.*]] = load i32, ptr [[P:%.*]], align 4 +; CHECK-NOMEMSSA-NEXT: [[QV:%.*]] = load i32, ptr [[Q:%.*]], align 4 +; CHECK-NOMEMSSA-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[P]]) +; CHECK-NOMEMSSA-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[P]]) +; CHECK-NOMEMSSA-NEXT: store i32 [[PV]], ptr [[P]], align 4 +; CHECK-NOMEMSSA-NEXT: store i32 [[QV]], ptr [[Q]], align 4 ; CHECK-NOMEMSSA-NEXT: ret void ; ; CHECK-LABEL: @test_writeback_lifetimes_multi_arg( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[PV:%.*]] = load i32, i32* [[P:%.*]], align 4 -; CHECK-NEXT: [[QV:%.*]] = load i32, i32* [[Q:%.*]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0i32(i64 8, i32* [[P]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0i32(i64 8, i32* [[P]]) -; CHECK-NEXT: store i32 [[PV]], i32* [[P]], align 4 -; CHECK-NEXT: store i32 [[QV]], i32* [[Q]], align 4 +; CHECK-NEXT: [[PV:%.*]] = load i32, ptr [[P:%.*]], align 4 +; CHECK-NEXT: [[QV:%.*]] = load i32, ptr [[Q:%.*]], align 4 +; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[P]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[P]]) +; CHECK-NEXT: store i32 [[PV]], ptr [[P]], align 4 +; CHECK-NEXT: store i32 [[QV]], ptr [[Q]], align 4 ; CHECK-NEXT: ret void ; entry: Index: llvm/test/Transforms/EarlyCSE/noalias-scope-decl.ll =================================================================== --- llvm/test/Transforms/EarlyCSE/noalias-scope-decl.ll +++ llvm/test/Transforms/EarlyCSE/noalias-scope-decl.ll @@ -1,12 +1,12 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -S < %s -early-cse -earlycse-debug-hash | FileCheck %s +; RUN: opt -S < %s -early-cse -earlycse-debug-hash -normalize-opaque-pointers | FileCheck %s ; Store-to-load forwarding across a @llvm.experimental.noalias.scope.decl. define float @s2l(float* %p) { ; CHECK-LABEL: @s2l( -; CHECK-NEXT: store float 0.000000e+00, float* [[P:%.*]], align 4 -; CHECK-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata !0) +; CHECK-NEXT: store float 0.000000e+00, ptr [[P:%.*]], align 4 +; CHECK-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META0:![0-9]+]]) ; CHECK-NEXT: ret float 0.000000e+00 ; store float 0.0, float* %p @@ -19,8 +19,8 @@ define float @rle(float* %p) { ; CHECK-LABEL: @rle( -; CHECK-NEXT: [[R:%.*]] = load float, float* [[P:%.*]], align 4 -; CHECK-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata !0) +; CHECK-NEXT: [[R:%.*]] = load float, ptr [[P:%.*]], align 4 +; CHECK-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META0]]) ; CHECK-NEXT: [[T:%.*]] = fadd float [[R]], [[R]] ; CHECK-NEXT: ret float [[T]] ; Index: llvm/test/Transforms/EarlyCSE/phi.ll =================================================================== --- llvm/test/Transforms/EarlyCSE/phi.ll +++ llvm/test/Transforms/EarlyCSE/phi.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -early-cse -earlycse-debug-hash -S < %s | FileCheck %s -; RUN: opt -basic-aa -early-cse-memssa -S < %s | FileCheck %s +; RUN: opt -early-cse -earlycse-debug-hash -normalize-opaque-pointers -S < %s | FileCheck %s +; RUN: opt -basic-aa -early-cse-memssa -normalize-opaque-pointers -S < %s | FileCheck %s ; Most basic case, fully identical PHI nodes define void @test0(i32 %v0, i32 %v1, i1 %c, i32* %d0, i32* %d1) { @@ -14,8 +14,8 @@ ; CHECK: end: ; CHECK-NEXT: [[I0:%.*]] = phi i32 [ [[V0:%.*]], [[B0]] ], [ [[V1:%.*]], [[B1]] ] ; CHECK-NEXT: [[I1:%.*]] = phi i32 [ [[V0]], [[B0]] ], [ [[V1]], [[B1]] ] -; CHECK-NEXT: store i32 [[I0]], i32* [[D0:%.*]], align 4 -; CHECK-NEXT: store i32 [[I1]], i32* [[D1:%.*]], align 4 +; CHECK-NEXT: store i32 [[I0]], ptr [[D0:%.*]], align 4 +; CHECK-NEXT: store i32 [[I1]], ptr [[D1:%.*]], align 4 ; CHECK-NEXT: ret void ; entry: @@ -47,8 +47,8 @@ ; CHECK: end: ; CHECK-NEXT: [[I0:%.*]] = phi i32 [ [[V0:%.*]], [[B0]] ], [ [[V1:%.*]], [[B1]] ] ; CHECK-NEXT: [[I1:%.*]] = phi i32 [ [[V1]], [[B1]] ], [ [[V0]], [[B0]] ] -; CHECK-NEXT: store i32 [[I0]], i32* [[D0:%.*]], align 4 -; CHECK-NEXT: store i32 [[I1]], i32* [[D1:%.*]], align 4 +; CHECK-NEXT: store i32 [[I0]], ptr [[D0:%.*]], align 4 +; CHECK-NEXT: store i32 [[I1]], ptr [[D1:%.*]], align 4 ; CHECK-NEXT: ret void ; entry: @@ -80,8 +80,8 @@ ; CHECK: end: ; CHECK-NEXT: [[I0:%.*]] = phi i32 [ [[V0:%.*]], [[B0]] ], [ [[V1:%.*]], [[B1]] ] ; CHECK-NEXT: [[I1:%.*]] = phi i32 [ [[V0]], [[B0]] ], [ [[V2:%.*]], [[B1]] ] -; CHECK-NEXT: store i32 [[I0]], i32* [[D0:%.*]], align 4 -; CHECK-NEXT: store i32 [[I1]], i32* [[D1:%.*]], align 4 +; CHECK-NEXT: store i32 [[I0]], ptr [[D0:%.*]], align 4 +; CHECK-NEXT: store i32 [[I1]], ptr [[D1:%.*]], align 4 ; CHECK-NEXT: ret void ; entry: @@ -111,8 +111,8 @@ ; CHECK: end: ; CHECK-NEXT: [[I0:%.*]] = phi i32 [ [[V0:%.*]], [[B0]] ], [ [[V1:%.*]], [[B1]] ] ; CHECK-NEXT: [[I1:%.*]] = phi i32 [ [[V2:%.*]], [[B1]] ], [ [[V0]], [[B0]] ] -; CHECK-NEXT: store i32 [[I0]], i32* [[D0:%.*]], align 4 -; CHECK-NEXT: store i32 [[I1]], i32* [[D1:%.*]], align 4 +; CHECK-NEXT: store i32 [[I0]], ptr [[D0:%.*]], align 4 +; CHECK-NEXT: store i32 [[I1]], ptr [[D1:%.*]], align 4 ; CHECK-NEXT: ret void ; entry: @@ -142,8 +142,8 @@ ; CHECK: end: ; CHECK-NEXT: [[I0:%.*]] = phi i32 [ [[V0:%.*]], [[B0]] ], [ [[V1:%.*]], [[B1]] ] ; CHECK-NEXT: [[I1:%.*]] = phi i32 [ [[V1]], [[B1]] ], [ [[V0]], [[B0]] ] -; CHECK-NEXT: store i32 [[I0]], i32* [[D0:%.*]], align 4 -; CHECK-NEXT: store i32 [[I1]], i32* [[D1:%.*]], align 4 +; CHECK-NEXT: store i32 [[I0]], ptr [[D0:%.*]], align 4 +; CHECK-NEXT: store i32 [[I1]], ptr [[D1:%.*]], align 4 ; CHECK-NEXT: ret void ; entry: @@ -174,7 +174,7 @@ ; CHECK-NEXT: br label [[END]] ; CHECK: end: ; CHECK-NEXT: [[I1:%.*]] = phi i32 [ [[V0:%.*]], [[B0]] ], [ [[V1:%.*]], [[B1]] ] -; CHECK-NEXT: store i32 [[I1]], i32* [[D1:%.*]], align 4 +; CHECK-NEXT: store i32 [[I1]], ptr [[D1:%.*]], align 4 ; CHECK-NEXT: ret void ; entry: @@ -203,7 +203,7 @@ ; CHECK-NEXT: br label [[END]] ; CHECK: end: ; CHECK-NEXT: [[I0:%.*]] = phi i32 [ [[V0:%.*]], [[B0]] ], [ [[V1:%.*]], [[B1]] ] -; CHECK-NEXT: store i32 [[I0]], i32* [[D0:%.*]], align 4 +; CHECK-NEXT: store i32 [[I0]], ptr [[D0:%.*]], align 4 ; CHECK-NEXT: ret void ; entry: @@ -235,9 +235,9 @@ ; CHECK-NEXT: [[IBAD:%.*]] = phi i16 [ [[V2:%.*]], [[B0]] ], [ [[V3:%.*]], [[B1]] ] ; CHECK-NEXT: [[I0:%.*]] = phi i32 [ [[V0:%.*]], [[B0]] ], [ [[V1:%.*]], [[B1]] ] ; CHECK-NEXT: [[I1:%.*]] = phi i32 [ [[V0]], [[B0]] ], [ [[V1]], [[B1]] ] -; CHECK-NEXT: store i32 [[I0]], i32* [[D0:%.*]], align 4 -; CHECK-NEXT: store i32 [[I1]], i32* [[D1:%.*]], align 4 -; CHECK-NEXT: store i16 [[IBAD]], i16* [[D2:%.*]], align 2 +; CHECK-NEXT: store i32 [[I0]], ptr [[D0:%.*]], align 4 +; CHECK-NEXT: store i32 [[I1]], ptr [[D1:%.*]], align 4 +; CHECK-NEXT: store i16 [[IBAD]], ptr [[D2:%.*]], align 2 ; CHECK-NEXT: ret void ; entry: @@ -270,9 +270,9 @@ ; CHECK-NEXT: [[I0:%.*]] = phi i32 [ [[V0:%.*]], [[B0]] ], [ [[V1:%.*]], [[B1]] ] ; CHECK-NEXT: [[IBAD:%.*]] = phi i16 [ [[V2:%.*]], [[B0]] ], [ [[V3:%.*]], [[B1]] ] ; CHECK-NEXT: [[I1:%.*]] = phi i32 [ [[V0]], [[B0]] ], [ [[V1]], [[B1]] ] -; CHECK-NEXT: store i32 [[I0]], i32* [[D0:%.*]], align 4 -; CHECK-NEXT: store i32 [[I1]], i32* [[D1:%.*]], align 4 -; CHECK-NEXT: store i16 [[IBAD]], i16* [[D2:%.*]], align 2 +; CHECK-NEXT: store i32 [[I0]], ptr [[D0:%.*]], align 4 +; CHECK-NEXT: store i32 [[I1]], ptr [[D1:%.*]], align 4 +; CHECK-NEXT: store i16 [[IBAD]], ptr [[D2:%.*]], align 2 ; CHECK-NEXT: ret void ; entry: @@ -305,9 +305,9 @@ ; CHECK-NEXT: [[I0:%.*]] = phi i32 [ [[V0:%.*]], [[B0]] ], [ [[V1:%.*]], [[B1]] ] ; CHECK-NEXT: [[I1:%.*]] = phi i32 [ [[V0]], [[B0]] ], [ [[V1]], [[B1]] ] ; CHECK-NEXT: [[IBAD:%.*]] = phi i16 [ [[V2:%.*]], [[B0]] ], [ [[V3:%.*]], [[B1]] ] -; CHECK-NEXT: store i32 [[I0]], i32* [[D0:%.*]], align 4 -; CHECK-NEXT: store i32 [[I1]], i32* [[D1:%.*]], align 4 -; CHECK-NEXT: store i16 [[IBAD]], i16* [[D2:%.*]], align 2 +; CHECK-NEXT: store i32 [[I0]], ptr [[D0:%.*]], align 4 +; CHECK-NEXT: store i32 [[I1]], ptr [[D1:%.*]], align 4 +; CHECK-NEXT: store i16 [[IBAD]], ptr [[D2:%.*]], align 2 ; CHECK-NEXT: ret void ; entry: Index: llvm/test/Transforms/EarlyCSE/pr33406.ll =================================================================== --- llvm/test/Transforms/EarlyCSE/pr33406.ll +++ llvm/test/Transforms/EarlyCSE/pr33406.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -early-cse-memssa -earlycse-debug-hash -S %s | FileCheck %s +; RUN: opt -early-cse-memssa -earlycse-debug-hash -normalize-opaque-pointers -S %s | FileCheck %s @b = external global i32 @@ -8,7 +8,7 @@ ; CHECK-NEXT: for.cond: ; CHECK-NEXT: br i1 true, label [[IF_END:%.*]], label [[FOR_INC:%.*]] ; CHECK: if.end: -; CHECK-NEXT: [[TINKYWINKY:%.*]] = load i32, i32* @b, align 4 +; CHECK-NEXT: [[TINKYWINKY:%.*]] = load i32, ptr @b, align 4 ; CHECK-NEXT: br i1 true, label [[FOR_INC]], label [[FOR_INC]] ; CHECK: for.inc: ; CHECK-NEXT: ret void Index: llvm/test/Transforms/EarlyCSE/readnone-mayunwind.ll =================================================================== --- llvm/test/Transforms/EarlyCSE/readnone-mayunwind.ll +++ llvm/test/Transforms/EarlyCSE/readnone-mayunwind.ll @@ -1,13 +1,13 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -S -early-cse -earlycse-debug-hash < %s | FileCheck %s +; RUN: opt -S -early-cse -earlycse-debug-hash -normalize-opaque-pointers < %s | FileCheck %s declare void @readnone_may_unwind() readnone define void @f(i32* %ptr) { ; CHECK-LABEL: @f( -; CHECK-NEXT: store i32 100, i32* [[PTR:%.*]], align 4 +; CHECK-NEXT: store i32 100, ptr [[PTR:%.*]], align 4 ; CHECK-NEXT: call void @readnone_may_unwind() -; CHECK-NEXT: store i32 200, i32* [[PTR]], align 4 +; CHECK-NEXT: store i32 200, ptr [[PTR]], align 4 ; CHECK-NEXT: ret void ; Index: llvm/test/Transforms/EarlyCSE/writeonly.ll =================================================================== --- llvm/test/Transforms/EarlyCSE/writeonly.ll +++ llvm/test/Transforms/EarlyCSE/writeonly.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -S -early-cse -earlycse-debug-hash < %s | FileCheck %s +; RUN: opt -S -early-cse -earlycse-debug-hash -normalize-opaque-pointers < %s | FileCheck %s @var = global i32 undef declare void @foo() nounwind @@ -7,7 +7,7 @@ define void @test() { ; CHECK-LABEL: @test( ; CHECK-NEXT: call void @foo() #[[ATTR1:[0-9]+]] -; CHECK-NEXT: store i32 2, i32* @var, align 4 +; CHECK-NEXT: store i32 2, ptr @var, align 4 ; CHECK-NEXT: ret void ; store i32 1, i32* @var Index: llvm/tools/opt/NewPMDriver.cpp =================================================================== --- llvm/tools/opt/NewPMDriver.cpp +++ llvm/tools/opt/NewPMDriver.cpp @@ -19,6 +19,8 @@ #include "llvm/Analysis/AliasAnalysis.h" #include "llvm/Analysis/CGSCCPassManager.h" #include "llvm/Analysis/TargetLibraryInfo.h" +#include "llvm/Bitcode/BitcodeReader.h" +#include "llvm/Bitcode/BitcodeWriter.h" #include "llvm/Bitcode/BitcodeWriterPass.h" #include "llvm/Config/llvm-config.h" #include "llvm/IR/Dominators.h" @@ -150,8 +152,65 @@ static cl::opt PseudoProbeForProfiling( "new-pm-pseudo-probe-for-profiling", cl::init(false), cl::Hidden, cl::desc("Emit pseudo probes to enable PGO profile generation.")); +static cl::opt NormalizeOpaquePointers( + "normalize-opaque-pointers", cl::Hidden, + cl::desc("Convert module to opaque pointers before printing")); /// @}} +static std::unique_ptr cloneModuleIntoContext( + LLVMContext &NewCtx, const Module &M, bool ShouldPreserveUseListOrder) { + SmallVector Buffer; + raw_svector_ostream OS(Buffer); + WriteBitcodeToFile(M, OS, ShouldPreserveUseListOrder); + MemoryBufferRef MBuf(OS.str(), "temporary bitcode"); + Expected> Module = parseBitcodeFile(MBuf, NewCtx); + if (!Module) + handleAllErrors(Module.takeError()); + return std::move(*Module); +} + +static void normalizeModuleForOpaquePointers(Module &M) { + for (Function &F : M.functions()) { + for (BasicBlock &BB : F) { + for (Instruction &I : make_early_inc_range(BB)) { + // Drop no-op bitcasts from ptr to ptr, which will usually not be + // present with opaque pointers. + if (auto *BC = dyn_cast(&I)) { + if (BC->getType() == BC->getOperand(0)->getType() && + BC->getType()->isPointerTy()) { + BC->replaceAllUsesWith(BC->getOperand(0)); + BC->eraseFromParent(); + } + } + } + } + } +} + +class CustomPrintModulePass : public PassInfoMixin { + raw_ostream &OS; + bool ShouldPreserveUseListOrder; + +public: + CustomPrintModulePass(raw_ostream &OS, bool ShouldPreserveUseListOrder) + : OS(OS), ShouldPreserveUseListOrder(ShouldPreserveUseListOrder) {} + + PreservedAnalyses run(Module &M, AnalysisManager &) { + if (NormalizeOpaquePointers && M.getContext().supportsTypedPointers()) { + LLVMContext OpaqueCtx; + OpaqueCtx.enableOpaquePointers(); + std::unique_ptr OpaqueM = + cloneModuleIntoContext(OpaqueCtx, M, ShouldPreserveUseListOrder); + normalizeModuleForOpaquePointers(*OpaqueM); + OpaqueM->print(OS, nullptr, ShouldPreserveUseListOrder); + } else { + M.print(OS, nullptr, ShouldPreserveUseListOrder); + } + return PreservedAnalyses::all(); + } + static bool isRequired() { return true; } +}; + template bool tryParsePipelineText(PassBuilder &PB, const cl::opt &PipelineOpt) { @@ -457,7 +516,7 @@ break; // No output pass needed. case OK_OutputAssembly: MPM.addPass( - PrintModulePass(Out->os(), "", ShouldPreserveAssemblyUseListOrder)); + CustomPrintModulePass(Out->os(), ShouldPreserveAssemblyUseListOrder)); break; case OK_OutputBitcode: MPM.addPass(BitcodeWriterPass(Out->os(), ShouldPreserveBitcodeUseListOrder,