9
9
define i32 @foo (i32* %a ) {
10
10
; CHECK: 1 = MemoryDef(liveOnEntry)
11
11
; CHECK-NEXT: store i32 0
12
- store i32 0 , i32* %a , align 4 , !llvm. invariant.group !0
12
+ store i32 0 , i32* %a , align 4 , !invariant.group !0
13
13
14
14
; CHECK: 2 = MemoryDef(1)
15
15
; CHECK-NEXT: store i32 1
@@ -19,12 +19,85 @@ define i32 @foo(i32* %a) {
19
19
%a8 = call i8* @llvm.invariant.group.barrier (i8* %1 )
20
20
%a32 = bitcast i8* %a8 to i32*
21
21
22
+ ; This have to be MemoryUse(1), because we can't skip the barrier based on
23
+ ; invariant.group.
22
24
; CHECK: MemoryUse(2)
23
25
; CHECK-NEXT: %2 = load i32
24
- %2 = load i32 , i32* %a32 , align 4 , !llvm. invariant.group !0
26
+ %2 = load i32 , i32* %a32 , align 4 , !invariant.group !0
25
27
ret i32 %2
26
28
}
27
29
30
+ define i32 @skipBarrier (i32* %a ) {
31
+ ; CHECK: 1 = MemoryDef(liveOnEntry)
32
+ ; CHECK-NEXT: store i32 0
33
+ store i32 0 , i32* %a , align 4 , !invariant.group !0
34
+
35
+ %1 = bitcast i32* %a to i8*
36
+ %a8 = call i8* @llvm.invariant.group.barrier (i8* %1 )
37
+ %a32 = bitcast i8* %a8 to i32*
38
+
39
+ ; We can skip the barrier only if the "skip" is not based on !invariant.group.
40
+ ; CHECK: MemoryUse(1)
41
+ ; CHECK-NEXT: %2 = load i32
42
+ %2 = load i32 , i32* %a32 , align 4 , !invariant.group !0
43
+ ret i32 %2
44
+ }
45
+
46
+ define i32 @skipBarrier2 (i32* %a ) {
47
+
48
+ ; CHECK: MemoryUse(liveOnEntry)
49
+ ; CHECK-NEXT: %v = load i32
50
+ %v = load i32 , i32* %a , align 4 , !invariant.group !0
51
+
52
+ %1 = bitcast i32* %a to i8*
53
+ %a8 = call i8* @llvm.invariant.group.barrier (i8* %1 )
54
+ %a32 = bitcast i8* %a8 to i32*
55
+
56
+ ; We can skip the barrier only if the "skip" is not based on !invariant.group.
57
+ ; CHECK: MemoryUse(liveOnEntry)
58
+ ; CHECK-NEXT: %v2 = load i32
59
+ %v2 = load i32 , i32* %a32 , align 4 , !invariant.group !0
60
+ ; CHECK: 1 = MemoryDef(liveOnEntry)
61
+ ; CHECK-NEXT: store i32 1
62
+ store i32 1 , i32* @g , align 4
63
+
64
+ ; FIXME: based on invariant.group it should be MemoryUse(liveOnEntry)
65
+ ; CHECK: MemoryUse(1)
66
+ ; CHECK-NEXT: %v3 = load i32
67
+ %v3 = load i32 , i32* %a32 , align 4 , !invariant.group !0
68
+ %add = add nsw i32 %v2 , %v3
69
+ %add2 = add nsw i32 %add , %v
70
+ ret i32 %add2
71
+ }
72
+
73
+ define i32 @handleInvariantGroups (i32* %a ) {
74
+ ; CHECK: 1 = MemoryDef(liveOnEntry)
75
+ ; CHECK-NEXT: store i32 0
76
+ store i32 0 , i32* %a , align 4 , !invariant.group !0
77
+
78
+ ; CHECK: 2 = MemoryDef(1)
79
+ ; CHECK-NEXT: store i32 1
80
+ store i32 1 , i32* @g , align 4
81
+ %1 = bitcast i32* %a to i8*
82
+ %a8 = call i8* @llvm.invariant.group.barrier (i8* %1 )
83
+ %a32 = bitcast i8* %a8 to i32*
84
+
85
+ ; CHECK: MemoryUse(2)
86
+ ; CHECK-NEXT: %2 = load i32
87
+ %2 = load i32 , i32* %a32 , align 4 , !invariant.group !0
88
+
89
+ ; CHECK: 3 = MemoryDef(2)
90
+ ; CHECK-NEXT: store i32 2
91
+ store i32 2 , i32* @g , align 4
92
+
93
+ ; FIXME: This can be changed to MemoryUse(2)
94
+ ; CHECK: MemoryUse(3)
95
+ ; CHECK-NEXT: %3 = load i32
96
+ %3 = load i32 , i32* %a32 , align 4 , !invariant.group !0
97
+ %add = add nsw i32 %2 , %3
98
+ ret i32 %add
99
+ }
100
+
28
101
declare i8* @llvm.invariant.group.barrier (i8* )
29
102
30
103
!0 = !{!"group1" }
0 commit comments