1
- // RUN: %clang_cc1 -fms-extensions -triple x86_64-windows-msvc %s -emit-llvm -o - | FileCheck %s
1
+ // RUN: %clang_cc1 -fms-extensions -triple x86_64-windows-msvc %s -emit-llvm -o - | FileCheck %s --check-prefix=X64
2
+ // RUN: %clang_cc1 -fms-extensions -triple thumbv7-windows-msvc %s -emit-llvm -o - | FileCheck %s --check-prefix=ARM
3
+ // RUN: %clang_cc1 -fms-extensions -triple aarch64-windows-msvc %s -emit-llvm -o - | FileCheck %s --check-prefix=ARM
2
4
3
5
volatile unsigned char sink = 0 ;
4
6
void test32 (long * base , long idx ) {
@@ -18,18 +20,105 @@ void test64(__int64 *base, __int64 idx) {
18
20
sink = _interlockedbittestandset64 (base , idx );
19
21
}
20
22
21
- // CHECK-LABEL: define dso_local void @test32(i32* %base, i32 %idx)
22
- // CHECK: call i8 asm sideeffect "btl $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i32* %{{.*}}, i32 {{.*}})
23
- // CHECK: call i8 asm sideeffect "btcl $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i32* %{{.*}}, i32 {{.*}})
24
- // CHECK: call i8 asm sideeffect "btrl $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i32* %{{.*}}, i32 {{.*}})
25
- // CHECK: call i8 asm sideeffect "btsl $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i32* %{{.*}}, i32 {{.*}})
26
- // CHECK: call i8 asm sideeffect "lock btrl $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i32* %{{.*}}, i32 {{.*}})
27
- // CHECK: call i8 asm sideeffect "lock btsl $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i32* %{{.*}}, i32 {{.*}})
28
-
29
- // CHECK-LABEL: define dso_local void @test64(i64* %base, i64 %idx)
30
- // CHECK: call i8 asm sideeffect "btq $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i64* %{{.*}}, i64 {{.*}})
31
- // CHECK: call i8 asm sideeffect "btcq $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i64* %{{.*}}, i64 {{.*}})
32
- // CHECK: call i8 asm sideeffect "btrq $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i64* %{{.*}}, i64 {{.*}})
33
- // CHECK: call i8 asm sideeffect "btsq $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i64* %{{.*}}, i64 {{.*}})
34
- // CHECK: call i8 asm sideeffect "lock btrq $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i64* %{{.*}}, i64 {{.*}})
35
- // CHECK: call i8 asm sideeffect "lock btsq $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i64* %{{.*}}, i64 {{.*}})
23
+ // X64-LABEL: define dso_local void @test32(i32* %base, i32 %idx)
24
+ // X64: call i8 asm sideeffect "btl $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i32* %{{.*}}, i32 {{.*}})
25
+ // X64: call i8 asm sideeffect "btcl $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i32* %{{.*}}, i32 {{.*}})
26
+ // X64: call i8 asm sideeffect "btrl $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i32* %{{.*}}, i32 {{.*}})
27
+ // X64: call i8 asm sideeffect "btsl $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i32* %{{.*}}, i32 {{.*}})
28
+ // X64: call i8 asm sideeffect "lock btrl $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i32* %{{.*}}, i32 {{.*}})
29
+ // X64: call i8 asm sideeffect "lock btsl $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i32* %{{.*}}, i32 {{.*}})
30
+
31
+ // X64-LABEL: define dso_local void @test64(i64* %base, i64 %idx)
32
+ // X64: call i8 asm sideeffect "btq $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i64* %{{.*}}, i64 {{.*}})
33
+ // X64: call i8 asm sideeffect "btcq $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i64* %{{.*}}, i64 {{.*}})
34
+ // X64: call i8 asm sideeffect "btrq $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i64* %{{.*}}, i64 {{.*}})
35
+ // X64: call i8 asm sideeffect "btsq $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i64* %{{.*}}, i64 {{.*}})
36
+ // X64: call i8 asm sideeffect "lock btrq $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i64* %{{.*}}, i64 {{.*}})
37
+ // X64: call i8 asm sideeffect "lock btsq $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i64* %{{.*}}, i64 {{.*}})
38
+
39
+ // ARM-LABEL: define dso_local {{.*}}void @test32(i32* %base, i32 %idx)
40
+ // ARM: %[[IDXHI:[^ ]*]] = ashr i32 %{{.*}}, 3
41
+ // ARM: %[[BASE:[^ ]*]] = bitcast i32* %{{.*}} to i8*
42
+ // ARM: %[[BYTEADDR:[^ ]*]] = getelementptr inbounds i8, i8* %[[BASE]], i32 %[[IDXHI]]
43
+ // ARM: %[[IDX8:[^ ]*]] = trunc i32 %{{.*}} to i8
44
+ // ARM: %[[IDXLO:[^ ]*]] = and i8 %[[IDX8]], 7
45
+ // ARM: %[[BYTE:[^ ]*]] = load i8, i8* %[[BYTEADDR]], align 1
46
+ // ARM: %[[BYTESHR:[^ ]*]] = lshr i8 %[[BYTE]], %[[IDXLO]]
47
+ // ARM: %[[RES:[^ ]*]] = and i8 %[[BYTESHR]], 1
48
+ // ARM: store volatile i8 %[[RES]], i8* @sink, align 1
49
+
50
+ // ARM: %[[IDXHI:[^ ]*]] = ashr i32 %{{.*}}, 3
51
+ // ARM: %[[BASE:[^ ]*]] = bitcast i32* %{{.*}} to i8*
52
+ // ARM: %[[BYTEADDR:[^ ]*]] = getelementptr inbounds i8, i8* %[[BASE]], i32 %[[IDXHI]]
53
+ // ARM: %[[IDX8:[^ ]*]] = trunc i32 %{{.*}} to i8
54
+ // ARM: %[[IDXLO:[^ ]*]] = and i8 %[[IDX8]], 7
55
+ // ARM: %[[MASK:[^ ]*]] = shl i8 1, %[[IDXLO]]
56
+ // ARM: %[[BYTE:[^ ]*]] = load i8, i8* %[[BYTEADDR]], align 1
57
+ // ARM: %[[NEWBYTE:[^ ]*]] = xor i8 %[[BYTE]], %[[MASK]]
58
+ // ARM store i8 %[[NEWBYTE]], i8* %[[BYTEADDR]], align 1
59
+ // ARM: %[[BYTESHR:[^ ]*]] = lshr i8 %[[BYTE]], %[[IDXLO]]
60
+ // ARM: %[[RES:[^ ]*]] = and i8 %[[BYTESHR]], 1
61
+ // ARM: store volatile i8 %[[RES]], i8* @sink, align 1
62
+
63
+ // ARM: %[[IDXHI:[^ ]*]] = ashr i32 %{{.*}}, 3
64
+ // ARM: %[[BASE:[^ ]*]] = bitcast i32* %{{.*}} to i8*
65
+ // ARM: %[[BYTEADDR:[^ ]*]] = getelementptr inbounds i8, i8* %[[BASE]], i32 %[[IDXHI]]
66
+ // ARM: %[[IDX8:[^ ]*]] = trunc i32 %{{.*}} to i8
67
+ // ARM: %[[IDXLO:[^ ]*]] = and i8 %[[IDX8]], 7
68
+ // ARM: %[[MASK:[^ ]*]] = shl i8 1, %[[IDXLO]]
69
+ // ARM: %[[BYTE:[^ ]*]] = load i8, i8* %[[BYTEADDR]], align 1
70
+ // ARM: %[[NOTMASK:[^ ]*]] = xor i8 %[[MASK]], -1
71
+ // ARM: %[[NEWBYTE:[^ ]*]] = and i8 %[[BYTE]], %[[NOTMASK]]
72
+ // ARM store i8 %[[NEWBYTE]], i8* %[[BYTEADDR]], align 1
73
+ // ARM: %[[BYTESHR:[^ ]*]] = lshr i8 %[[BYTE]], %[[IDXLO]]
74
+ // ARM: %[[RES:[^ ]*]] = and i8 %[[BYTESHR]], 1
75
+ // ARM: store volatile i8 %[[RES]], i8* @sink, align 1
76
+
77
+ // ARM: %[[IDXHI:[^ ]*]] = ashr i32 %{{.*}}, 3
78
+ // ARM: %[[BASE:[^ ]*]] = bitcast i32* %{{.*}} to i8*
79
+ // ARM: %[[BYTEADDR:[^ ]*]] = getelementptr inbounds i8, i8* %[[BASE]], i32 %[[IDXHI]]
80
+ // ARM: %[[IDX8:[^ ]*]] = trunc i32 %{{.*}} to i8
81
+ // ARM: %[[IDXLO:[^ ]*]] = and i8 %[[IDX8]], 7
82
+ // ARM: %[[MASK:[^ ]*]] = shl i8 1, %[[IDXLO]]
83
+ // ARM: %[[BYTE:[^ ]*]] = load i8, i8* %[[BYTEADDR]], align 1
84
+ // ARM: %[[NEWBYTE:[^ ]*]] = or i8 %[[BYTE]], %[[MASK]]
85
+ // ARM store i8 %[[NEWBYTE]], i8* %[[BYTEADDR]], align 1
86
+ // ARM: %[[BYTESHR:[^ ]*]] = lshr i8 %[[BYTE]], %[[IDXLO]]
87
+ // ARM: %[[RES:[^ ]*]] = and i8 %[[BYTESHR]], 1
88
+ // ARM: store volatile i8 %[[RES]], i8* @sink, align 1
89
+
90
+ // ARM: %[[IDXHI:[^ ]*]] = ashr i32 %{{.*}}, 3
91
+ // ARM: %[[BASE:[^ ]*]] = bitcast i32* %{{.*}} to i8*
92
+ // ARM: %[[BYTEADDR:[^ ]*]] = getelementptr inbounds i8, i8* %[[BASE]], i32 %[[IDXHI]]
93
+ // ARM: %[[IDX8:[^ ]*]] = trunc i32 %{{.*}} to i8
94
+ // ARM: %[[IDXLO:[^ ]*]] = and i8 %[[IDX8]], 7
95
+ // ARM: %[[MASK:[^ ]*]] = shl i8 1, %[[IDXLO]]
96
+ // ARM: %[[NOTMASK:[^ ]*]] = xor i8 %[[MASK]], -1
97
+ // ARM: %[[BYTE:[^ ]*]] = atomicrmw and i8* %[[BYTEADDR]], i8 %[[NOTMASK]] seq_cst
98
+ // ARM: %[[BYTESHR:[^ ]*]] = lshr i8 %[[BYTE]], %[[IDXLO]]
99
+ // ARM: %[[RES:[^ ]*]] = and i8 %[[BYTESHR]], 1
100
+ // ARM: store volatile i8 %[[RES]], i8* @sink, align 1
101
+
102
+ // ARM: %[[IDXHI:[^ ]*]] = ashr i32 %{{.*}}, 3
103
+ // ARM: %[[BASE:[^ ]*]] = bitcast i32* %{{.*}} to i8*
104
+ // ARM: %[[BYTEADDR:[^ ]*]] = getelementptr inbounds i8, i8* %[[BASE]], i32 %[[IDXHI]]
105
+ // ARM: %[[IDX8:[^ ]*]] = trunc i32 %{{.*}} to i8
106
+ // ARM: %[[IDXLO:[^ ]*]] = and i8 %[[IDX8]], 7
107
+ // ARM: %[[MASK:[^ ]*]] = shl i8 1, %[[IDXLO]]
108
+ // ARM: %[[BYTE:[^ ]*]] = atomicrmw or i8* %[[BYTEADDR]], i8 %[[MASK]] seq_cst
109
+ // ARM: %[[BYTESHR:[^ ]*]] = lshr i8 %[[BYTE]], %[[IDXLO]]
110
+ // ARM: %[[RES:[^ ]*]] = and i8 %[[BYTESHR]], 1
111
+ // ARM: store volatile i8 %[[RES]], i8* @sink, align 1
112
+
113
+ // ARM-LABEL: define dso_local {{.*}}void @test64(i64* %base, i64 %idx)
114
+ // ARM: %[[IDXHI:[^ ]*]] = ashr i64 %{{.*}}, 3
115
+ // ARM: %[[BASE:[^ ]*]] = bitcast i64* %{{.*}} to i8*
116
+ // ARM: %[[BYTEADDR:[^ ]*]] = getelementptr inbounds i8, i8* %[[BASE]], i64 %[[IDXHI]]
117
+ // ARM: %[[IDX8:[^ ]*]] = trunc i64 %{{.*}} to i8
118
+ // ARM: %[[IDXLO:[^ ]*]] = and i8 %[[IDX8]], 7
119
+ // ARM: %[[BYTE:[^ ]*]] = load i8, i8* %[[BYTEADDR]], align 1
120
+ // ARM: %[[BYTESHR:[^ ]*]] = lshr i8 %[[BYTE]], %[[IDXLO]]
121
+ // ARM: %[[RES:[^ ]*]] = and i8 %[[BYTESHR]], 1
122
+ // ARM: store volatile i8 %[[RES]], i8* @sink, align 1
123
+
124
+ // ... the rest is the same, but with i64 instead of i32.
0 commit comments