Index: test/CodeGen/AMDGPU/global_atomics.ll =================================================================== --- test/CodeGen/AMDGPU/global_atomics.ll +++ test/CodeGen/AMDGPU/global_atomics.ll @@ -1,13 +1,35 @@ -; RUN: llc < %s -march=amdgcn -mcpu=SI -verify-machineinstrs | FileCheck --check-prefix=GCN --check-prefix=SI --check-prefix=FUNC %s -; RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck --check-prefix=GCN --check-prefix=VI --check-prefix=FUNC %s - +; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI -check-prefix=FUNC %s +; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI -check-prefix=FUNC %s ; FUNC-LABEL: {{^}}atomic_add_i32_offset: ; GCN: buffer_atomic_add v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}} define void @atomic_add_i32_offset(i32 addrspace(1)* %out, i32 %in) { entry: - %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4 - %0 = atomicrmw volatile add i32 addrspace(1)* %gep, i32 %in seq_cst + %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4 + %val = atomicrmw volatile add i32 addrspace(1)* %gep, i32 %in seq_cst + ret void +} + +; FUNC-LABEL: {{^}}atomic_add_i32_soffset: +; GCN: s_mov_b32 [[SREG:s[0-9]+]], 0x8ca0 +; GCN: buffer_atomic_add v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], [[SREG]]{{$}} +define void @atomic_add_i32_soffset(i32 addrspace(1)* %out, i32 %in) { +entry: + %gep = getelementptr i32, i32 addrspace(1)* %out, i64 9000 + %val = atomicrmw volatile add i32 addrspace(1)* %gep, i32 %in seq_cst + ret void +} + +; FUNC-LABEL: {{^}}atomic_add_i32_huge_offset: +; SI-DAG: v_mov_b32_e32 v[[PTRLO:[0-9]+]], 0xdeac +; SI-DAG: v_mov_b32_e32 v[[PTRHI:[0-9]+]], 0xabcd +; SI: buffer_atomic_add v{{[0-9]+}}, v{{\[}}[[PTRLO]]:[[PTRHI]]{{\]}}, s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}} +; VI: flat_atomic_add +define void @atomic_add_i32_huge_offset(i32 addrspace(1)* %out, i32 %in) { +entry: + %gep = getelementptr i32, i32 addrspace(1)* %out, i64 47224239175595 + + %val = atomicrmw volatile add i32 addrspace(1)* %gep, i32 %in seq_cst ret void } @@ -16,21 +38,20 @@ ; GCN: buffer_store_dword [[RET]] define void @atomic_add_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) { entry: - %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4 - %0 = atomicrmw volatile add i32 addrspace(1)* %gep, i32 %in seq_cst - store i32 %0, i32 addrspace(1)* %out2 + %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4 + %val = atomicrmw volatile add i32 addrspace(1)* %gep, i32 %in seq_cst + store i32 %val, i32 addrspace(1)* %out2 ret void } ; FUNC-LABEL: {{^}}atomic_add_i32_addr64_offset: ; SI: buffer_atomic_add v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16{{$}} ; VI: flat_atomic_add v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}} - define void @atomic_add_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index - %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4 - %0 = atomicrmw volatile add i32 addrspace(1)* %gep, i32 %in seq_cst + %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4 + %val = atomicrmw volatile add i32 addrspace(1)* %gep, i32 %in seq_cst ret void } @@ -41,9 +62,9 @@ define void @atomic_add_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index - %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4 - %0 = atomicrmw volatile add i32 addrspace(1)* %gep, i32 %in seq_cst - store i32 %0, i32 addrspace(1)* %out2 + %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4 + %val = atomicrmw volatile add i32 addrspace(1)* %gep, i32 %in seq_cst + store i32 %val, i32 addrspace(1)* %out2 ret void } @@ -51,7 +72,7 @@ ; GCN: buffer_atomic_add v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}} define void @atomic_add_i32(i32 addrspace(1)* %out, i32 %in) { entry: - %0 = atomicrmw volatile add i32 addrspace(1)* %out, i32 %in seq_cst + %val = atomicrmw volatile add i32 addrspace(1)* %out, i32 %in seq_cst ret void } @@ -60,8 +81,8 @@ ; GCN: buffer_store_dword [[RET]] define void @atomic_add_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) { entry: - %0 = atomicrmw volatile add i32 addrspace(1)* %out, i32 %in seq_cst - store i32 %0, i32 addrspace(1)* %out2 + %val = atomicrmw volatile add i32 addrspace(1)* %out, i32 %in seq_cst + store i32 %val, i32 addrspace(1)* %out2 ret void } @@ -71,7 +92,7 @@ define void @atomic_add_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index - %0 = atomicrmw volatile add i32 addrspace(1)* %ptr, i32 %in seq_cst + %val = atomicrmw volatile add i32 addrspace(1)* %ptr, i32 %in seq_cst ret void } @@ -82,8 +103,8 @@ define void @atomic_add_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index - %0 = atomicrmw volatile add i32 addrspace(1)* %ptr, i32 %in seq_cst - store i32 %0, i32 addrspace(1)* %out2 + %val = atomicrmw volatile add i32 addrspace(1)* %ptr, i32 %in seq_cst + store i32 %val, i32 addrspace(1)* %out2 ret void } @@ -91,8 +112,8 @@ ; GCN: buffer_atomic_and v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}} define void @atomic_and_i32_offset(i32 addrspace(1)* %out, i32 %in) { entry: - %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4 - %0 = atomicrmw volatile and i32 addrspace(1)* %gep, i32 %in seq_cst + %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4 + %val = atomicrmw volatile and i32 addrspace(1)* %gep, i32 %in seq_cst ret void } @@ -101,9 +122,9 @@ ; GCN: buffer_store_dword [[RET]] define void @atomic_and_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) { entry: - %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4 - %0 = atomicrmw volatile and i32 addrspace(1)* %gep, i32 %in seq_cst - store i32 %0, i32 addrspace(1)* %out2 + %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4 + %val = atomicrmw volatile and i32 addrspace(1)* %gep, i32 %in seq_cst + store i32 %val, i32 addrspace(1)* %out2 ret void } @@ -113,8 +134,8 @@ define void @atomic_and_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index - %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4 - %0 = atomicrmw volatile and i32 addrspace(1)* %gep, i32 %in seq_cst + %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4 + %val = atomicrmw volatile and i32 addrspace(1)* %gep, i32 %in seq_cst ret void } @@ -125,9 +146,9 @@ define void @atomic_and_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index - %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4 - %0 = atomicrmw volatile and i32 addrspace(1)* %gep, i32 %in seq_cst - store i32 %0, i32 addrspace(1)* %out2 + %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4 + %val = atomicrmw volatile and i32 addrspace(1)* %gep, i32 %in seq_cst + store i32 %val, i32 addrspace(1)* %out2 ret void } @@ -135,7 +156,7 @@ ; GCN: buffer_atomic_and v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}} define void @atomic_and_i32(i32 addrspace(1)* %out, i32 %in) { entry: - %0 = atomicrmw volatile and i32 addrspace(1)* %out, i32 %in seq_cst + %val = atomicrmw volatile and i32 addrspace(1)* %out, i32 %in seq_cst ret void } @@ -144,8 +165,8 @@ ; GCN: buffer_store_dword [[RET]] define void @atomic_and_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) { entry: - %0 = atomicrmw volatile and i32 addrspace(1)* %out, i32 %in seq_cst - store i32 %0, i32 addrspace(1)* %out2 + %val = atomicrmw volatile and i32 addrspace(1)* %out, i32 %in seq_cst + store i32 %val, i32 addrspace(1)* %out2 ret void } @@ -155,7 +176,7 @@ define void @atomic_and_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index - %0 = atomicrmw volatile and i32 addrspace(1)* %ptr, i32 %in seq_cst + %val = atomicrmw volatile and i32 addrspace(1)* %ptr, i32 %in seq_cst ret void } @@ -166,8 +187,8 @@ define void @atomic_and_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index - %0 = atomicrmw volatile and i32 addrspace(1)* %ptr, i32 %in seq_cst - store i32 %0, i32 addrspace(1)* %out2 + %val = atomicrmw volatile and i32 addrspace(1)* %ptr, i32 %in seq_cst + store i32 %val, i32 addrspace(1)* %out2 ret void } @@ -175,8 +196,8 @@ ; GCN: buffer_atomic_sub v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}} define void @atomic_sub_i32_offset(i32 addrspace(1)* %out, i32 %in) { entry: - %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4 - %0 = atomicrmw volatile sub i32 addrspace(1)* %gep, i32 %in seq_cst + %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4 + %val = atomicrmw volatile sub i32 addrspace(1)* %gep, i32 %in seq_cst ret void } @@ -185,9 +206,9 @@ ; GCN: buffer_store_dword [[RET]] define void @atomic_sub_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) { entry: - %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4 - %0 = atomicrmw volatile sub i32 addrspace(1)* %gep, i32 %in seq_cst - store i32 %0, i32 addrspace(1)* %out2 + %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4 + %val = atomicrmw volatile sub i32 addrspace(1)* %gep, i32 %in seq_cst + store i32 %val, i32 addrspace(1)* %out2 ret void } @@ -197,8 +218,8 @@ define void @atomic_sub_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index - %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4 - %0 = atomicrmw volatile sub i32 addrspace(1)* %gep, i32 %in seq_cst + %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4 + %val = atomicrmw volatile sub i32 addrspace(1)* %gep, i32 %in seq_cst ret void } @@ -209,9 +230,9 @@ define void @atomic_sub_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index - %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4 - %0 = atomicrmw volatile sub i32 addrspace(1)* %gep, i32 %in seq_cst - store i32 %0, i32 addrspace(1)* %out2 + %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4 + %val = atomicrmw volatile sub i32 addrspace(1)* %gep, i32 %in seq_cst + store i32 %val, i32 addrspace(1)* %out2 ret void } @@ -219,7 +240,7 @@ ; GCN: buffer_atomic_sub v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}} define void @atomic_sub_i32(i32 addrspace(1)* %out, i32 %in) { entry: - %0 = atomicrmw volatile sub i32 addrspace(1)* %out, i32 %in seq_cst + %val = atomicrmw volatile sub i32 addrspace(1)* %out, i32 %in seq_cst ret void } @@ -228,8 +249,8 @@ ; GCN: buffer_store_dword [[RET]] define void @atomic_sub_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) { entry: - %0 = atomicrmw volatile sub i32 addrspace(1)* %out, i32 %in seq_cst - store i32 %0, i32 addrspace(1)* %out2 + %val = atomicrmw volatile sub i32 addrspace(1)* %out, i32 %in seq_cst + store i32 %val, i32 addrspace(1)* %out2 ret void } @@ -239,7 +260,7 @@ define void @atomic_sub_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index - %0 = atomicrmw volatile sub i32 addrspace(1)* %ptr, i32 %in seq_cst + %val = atomicrmw volatile sub i32 addrspace(1)* %ptr, i32 %in seq_cst ret void } @@ -250,8 +271,8 @@ define void @atomic_sub_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index - %0 = atomicrmw volatile sub i32 addrspace(1)* %ptr, i32 %in seq_cst - store i32 %0, i32 addrspace(1)* %out2 + %val = atomicrmw volatile sub i32 addrspace(1)* %ptr, i32 %in seq_cst + store i32 %val, i32 addrspace(1)* %out2 ret void } @@ -259,8 +280,8 @@ ; GCN: buffer_atomic_smax v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}} define void @atomic_max_i32_offset(i32 addrspace(1)* %out, i32 %in) { entry: - %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4 - %0 = atomicrmw volatile max i32 addrspace(1)* %gep, i32 %in seq_cst + %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4 + %val = atomicrmw volatile max i32 addrspace(1)* %gep, i32 %in seq_cst ret void } @@ -269,9 +290,9 @@ ; GCN: buffer_store_dword [[RET]] define void @atomic_max_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) { entry: - %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4 - %0 = atomicrmw volatile max i32 addrspace(1)* %gep, i32 %in seq_cst - store i32 %0, i32 addrspace(1)* %out2 + %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4 + %val = atomicrmw volatile max i32 addrspace(1)* %gep, i32 %in seq_cst + store i32 %val, i32 addrspace(1)* %out2 ret void } @@ -281,8 +302,8 @@ define void @atomic_max_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index - %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4 - %0 = atomicrmw volatile max i32 addrspace(1)* %gep, i32 %in seq_cst + %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4 + %val = atomicrmw volatile max i32 addrspace(1)* %gep, i32 %in seq_cst ret void } @@ -293,9 +314,9 @@ define void @atomic_max_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index - %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4 - %0 = atomicrmw volatile max i32 addrspace(1)* %gep, i32 %in seq_cst - store i32 %0, i32 addrspace(1)* %out2 + %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4 + %val = atomicrmw volatile max i32 addrspace(1)* %gep, i32 %in seq_cst + store i32 %val, i32 addrspace(1)* %out2 ret void } @@ -303,7 +324,7 @@ ; GCN: buffer_atomic_smax v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}} define void @atomic_max_i32(i32 addrspace(1)* %out, i32 %in) { entry: - %0 = atomicrmw volatile max i32 addrspace(1)* %out, i32 %in seq_cst + %val = atomicrmw volatile max i32 addrspace(1)* %out, i32 %in seq_cst ret void } @@ -312,8 +333,8 @@ ; GCN: buffer_store_dword [[RET]] define void @atomic_max_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) { entry: - %0 = atomicrmw volatile max i32 addrspace(1)* %out, i32 %in seq_cst - store i32 %0, i32 addrspace(1)* %out2 + %val = atomicrmw volatile max i32 addrspace(1)* %out, i32 %in seq_cst + store i32 %val, i32 addrspace(1)* %out2 ret void } @@ -323,7 +344,7 @@ define void @atomic_max_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index - %0 = atomicrmw volatile max i32 addrspace(1)* %ptr, i32 %in seq_cst + %val = atomicrmw volatile max i32 addrspace(1)* %ptr, i32 %in seq_cst ret void } @@ -334,8 +355,8 @@ define void @atomic_max_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index - %0 = atomicrmw volatile max i32 addrspace(1)* %ptr, i32 %in seq_cst - store i32 %0, i32 addrspace(1)* %out2 + %val = atomicrmw volatile max i32 addrspace(1)* %ptr, i32 %in seq_cst + store i32 %val, i32 addrspace(1)* %out2 ret void } @@ -343,8 +364,8 @@ ; GCN: buffer_atomic_umax v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}} define void @atomic_umax_i32_offset(i32 addrspace(1)* %out, i32 %in) { entry: - %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4 - %0 = atomicrmw volatile umax i32 addrspace(1)* %gep, i32 %in seq_cst + %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4 + %val = atomicrmw volatile umax i32 addrspace(1)* %gep, i32 %in seq_cst ret void } @@ -353,9 +374,9 @@ ; GCN: buffer_store_dword [[RET]] define void @atomic_umax_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) { entry: - %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4 - %0 = atomicrmw volatile umax i32 addrspace(1)* %gep, i32 %in seq_cst - store i32 %0, i32 addrspace(1)* %out2 + %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4 + %val = atomicrmw volatile umax i32 addrspace(1)* %gep, i32 %in seq_cst + store i32 %val, i32 addrspace(1)* %out2 ret void } @@ -365,8 +386,8 @@ define void @atomic_umax_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index - %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4 - %0 = atomicrmw volatile umax i32 addrspace(1)* %gep, i32 %in seq_cst + %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4 + %val = atomicrmw volatile umax i32 addrspace(1)* %gep, i32 %in seq_cst ret void } @@ -377,9 +398,9 @@ define void @atomic_umax_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index - %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4 - %0 = atomicrmw volatile umax i32 addrspace(1)* %gep, i32 %in seq_cst - store i32 %0, i32 addrspace(1)* %out2 + %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4 + %val = atomicrmw volatile umax i32 addrspace(1)* %gep, i32 %in seq_cst + store i32 %val, i32 addrspace(1)* %out2 ret void } @@ -387,7 +408,7 @@ ; GCN: buffer_atomic_umax v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}} define void @atomic_umax_i32(i32 addrspace(1)* %out, i32 %in) { entry: - %0 = atomicrmw volatile umax i32 addrspace(1)* %out, i32 %in seq_cst + %val = atomicrmw volatile umax i32 addrspace(1)* %out, i32 %in seq_cst ret void } @@ -396,8 +417,8 @@ ; GCN: buffer_store_dword [[RET]] define void @atomic_umax_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) { entry: - %0 = atomicrmw volatile umax i32 addrspace(1)* %out, i32 %in seq_cst - store i32 %0, i32 addrspace(1)* %out2 + %val = atomicrmw volatile umax i32 addrspace(1)* %out, i32 %in seq_cst + store i32 %val, i32 addrspace(1)* %out2 ret void } @@ -407,7 +428,7 @@ define void @atomic_umax_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index - %0 = atomicrmw volatile umax i32 addrspace(1)* %ptr, i32 %in seq_cst + %val = atomicrmw volatile umax i32 addrspace(1)* %ptr, i32 %in seq_cst ret void } @@ -418,8 +439,8 @@ define void @atomic_umax_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index - %0 = atomicrmw volatile umax i32 addrspace(1)* %ptr, i32 %in seq_cst - store i32 %0, i32 addrspace(1)* %out2 + %val = atomicrmw volatile umax i32 addrspace(1)* %ptr, i32 %in seq_cst + store i32 %val, i32 addrspace(1)* %out2 ret void } @@ -427,8 +448,8 @@ ; GCN: buffer_atomic_smin v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}} define void @atomic_min_i32_offset(i32 addrspace(1)* %out, i32 %in) { entry: - %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4 - %0 = atomicrmw volatile min i32 addrspace(1)* %gep, i32 %in seq_cst + %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4 + %val = atomicrmw volatile min i32 addrspace(1)* %gep, i32 %in seq_cst ret void } @@ -437,9 +458,9 @@ ; GCN: buffer_store_dword [[RET]] define void @atomic_min_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) { entry: - %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4 - %0 = atomicrmw volatile min i32 addrspace(1)* %gep, i32 %in seq_cst - store i32 %0, i32 addrspace(1)* %out2 + %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4 + %val = atomicrmw volatile min i32 addrspace(1)* %gep, i32 %in seq_cst + store i32 %val, i32 addrspace(1)* %out2 ret void } @@ -449,8 +470,8 @@ define void @atomic_min_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index - %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4 - %0 = atomicrmw volatile min i32 addrspace(1)* %gep, i32 %in seq_cst + %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4 + %val = atomicrmw volatile min i32 addrspace(1)* %gep, i32 %in seq_cst ret void } @@ -461,9 +482,9 @@ define void @atomic_min_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index - %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4 - %0 = atomicrmw volatile min i32 addrspace(1)* %gep, i32 %in seq_cst - store i32 %0, i32 addrspace(1)* %out2 + %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4 + %val = atomicrmw volatile min i32 addrspace(1)* %gep, i32 %in seq_cst + store i32 %val, i32 addrspace(1)* %out2 ret void } @@ -471,7 +492,7 @@ ; GCN: buffer_atomic_smin v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}} define void @atomic_min_i32(i32 addrspace(1)* %out, i32 %in) { entry: - %0 = atomicrmw volatile min i32 addrspace(1)* %out, i32 %in seq_cst + %val = atomicrmw volatile min i32 addrspace(1)* %out, i32 %in seq_cst ret void } @@ -480,8 +501,8 @@ ; GCN: buffer_store_dword [[RET]] define void @atomic_min_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) { entry: - %0 = atomicrmw volatile min i32 addrspace(1)* %out, i32 %in seq_cst - store i32 %0, i32 addrspace(1)* %out2 + %val = atomicrmw volatile min i32 addrspace(1)* %out, i32 %in seq_cst + store i32 %val, i32 addrspace(1)* %out2 ret void } @@ -491,7 +512,7 @@ define void @atomic_min_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index - %0 = atomicrmw volatile min i32 addrspace(1)* %ptr, i32 %in seq_cst + %val = atomicrmw volatile min i32 addrspace(1)* %ptr, i32 %in seq_cst ret void } @@ -502,8 +523,8 @@ define void @atomic_min_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index - %0 = atomicrmw volatile min i32 addrspace(1)* %ptr, i32 %in seq_cst - store i32 %0, i32 addrspace(1)* %out2 + %val = atomicrmw volatile min i32 addrspace(1)* %ptr, i32 %in seq_cst + store i32 %val, i32 addrspace(1)* %out2 ret void } @@ -511,8 +532,8 @@ ; GCN: buffer_atomic_umin v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}} define void @atomic_umin_i32_offset(i32 addrspace(1)* %out, i32 %in) { entry: - %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4 - %0 = atomicrmw volatile umin i32 addrspace(1)* %gep, i32 %in seq_cst + %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4 + %val = atomicrmw volatile umin i32 addrspace(1)* %gep, i32 %in seq_cst ret void } @@ -521,9 +542,9 @@ ; GCN: buffer_store_dword [[RET]] define void @atomic_umin_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) { entry: - %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4 - %0 = atomicrmw volatile umin i32 addrspace(1)* %gep, i32 %in seq_cst - store i32 %0, i32 addrspace(1)* %out2 + %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4 + %val = atomicrmw volatile umin i32 addrspace(1)* %gep, i32 %in seq_cst + store i32 %val, i32 addrspace(1)* %out2 ret void } @@ -533,8 +554,8 @@ define void @atomic_umin_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index - %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4 - %0 = atomicrmw volatile umin i32 addrspace(1)* %gep, i32 %in seq_cst + %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4 + %val = atomicrmw volatile umin i32 addrspace(1)* %gep, i32 %in seq_cst ret void } @@ -545,9 +566,9 @@ define void @atomic_umin_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index - %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4 - %0 = atomicrmw volatile umin i32 addrspace(1)* %gep, i32 %in seq_cst - store i32 %0, i32 addrspace(1)* %out2 + %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4 + %val = atomicrmw volatile umin i32 addrspace(1)* %gep, i32 %in seq_cst + store i32 %val, i32 addrspace(1)* %out2 ret void } @@ -555,7 +576,7 @@ ; GCN: buffer_atomic_umin v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}} define void @atomic_umin_i32(i32 addrspace(1)* %out, i32 %in) { entry: - %0 = atomicrmw volatile umin i32 addrspace(1)* %out, i32 %in seq_cst + %val = atomicrmw volatile umin i32 addrspace(1)* %out, i32 %in seq_cst ret void } @@ -564,8 +585,8 @@ ; GCN: buffer_store_dword [[RET]] define void @atomic_umin_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) { entry: - %0 = atomicrmw volatile umin i32 addrspace(1)* %out, i32 %in seq_cst - store i32 %0, i32 addrspace(1)* %out2 + %val = atomicrmw volatile umin i32 addrspace(1)* %out, i32 %in seq_cst + store i32 %val, i32 addrspace(1)* %out2 ret void } @@ -575,7 +596,7 @@ define void @atomic_umin_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index - %0 = atomicrmw volatile umin i32 addrspace(1)* %ptr, i32 %in seq_cst + %val = atomicrmw volatile umin i32 addrspace(1)* %ptr, i32 %in seq_cst ret void } @@ -586,8 +607,8 @@ define void @atomic_umin_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index - %0 = atomicrmw volatile umin i32 addrspace(1)* %ptr, i32 %in seq_cst - store i32 %0, i32 addrspace(1)* %out2 + %val = atomicrmw volatile umin i32 addrspace(1)* %ptr, i32 %in seq_cst + store i32 %val, i32 addrspace(1)* %out2 ret void } @@ -595,8 +616,8 @@ ; GCN: buffer_atomic_or v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}} define void @atomic_or_i32_offset(i32 addrspace(1)* %out, i32 %in) { entry: - %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4 - %0 = atomicrmw volatile or i32 addrspace(1)* %gep, i32 %in seq_cst + %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4 + %val = atomicrmw volatile or i32 addrspace(1)* %gep, i32 %in seq_cst ret void } @@ -605,9 +626,9 @@ ; GCN: buffer_store_dword [[RET]] define void @atomic_or_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) { entry: - %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4 - %0 = atomicrmw volatile or i32 addrspace(1)* %gep, i32 %in seq_cst - store i32 %0, i32 addrspace(1)* %out2 + %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4 + %val = atomicrmw volatile or i32 addrspace(1)* %gep, i32 %in seq_cst + store i32 %val, i32 addrspace(1)* %out2 ret void } @@ -617,8 +638,8 @@ define void @atomic_or_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index - %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4 - %0 = atomicrmw volatile or i32 addrspace(1)* %gep, i32 %in seq_cst + %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4 + %val = atomicrmw volatile or i32 addrspace(1)* %gep, i32 %in seq_cst ret void } @@ -629,9 +650,9 @@ define void @atomic_or_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index - %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4 - %0 = atomicrmw volatile or i32 addrspace(1)* %gep, i32 %in seq_cst - store i32 %0, i32 addrspace(1)* %out2 + %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4 + %val = atomicrmw volatile or i32 addrspace(1)* %gep, i32 %in seq_cst + store i32 %val, i32 addrspace(1)* %out2 ret void } @@ -639,7 +660,7 @@ ; GCN: buffer_atomic_or v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}} define void @atomic_or_i32(i32 addrspace(1)* %out, i32 %in) { entry: - %0 = atomicrmw volatile or i32 addrspace(1)* %out, i32 %in seq_cst + %val = atomicrmw volatile or i32 addrspace(1)* %out, i32 %in seq_cst ret void } @@ -648,8 +669,8 @@ ; GCN: buffer_store_dword [[RET]] define void @atomic_or_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) { entry: - %0 = atomicrmw volatile or i32 addrspace(1)* %out, i32 %in seq_cst - store i32 %0, i32 addrspace(1)* %out2 + %val = atomicrmw volatile or i32 addrspace(1)* %out, i32 %in seq_cst + store i32 %val, i32 addrspace(1)* %out2 ret void } @@ -659,7 +680,7 @@ define void @atomic_or_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index - %0 = atomicrmw volatile or i32 addrspace(1)* %ptr, i32 %in seq_cst + %val = atomicrmw volatile or i32 addrspace(1)* %ptr, i32 %in seq_cst ret void } @@ -670,8 +691,8 @@ define void @atomic_or_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index - %0 = atomicrmw volatile or i32 addrspace(1)* %ptr, i32 %in seq_cst - store i32 %0, i32 addrspace(1)* %out2 + %val = atomicrmw volatile or i32 addrspace(1)* %ptr, i32 %in seq_cst + store i32 %val, i32 addrspace(1)* %out2 ret void } @@ -679,8 +700,8 @@ ; GCN: buffer_atomic_swap v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}} define void @atomic_xchg_i32_offset(i32 addrspace(1)* %out, i32 %in) { entry: - %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4 - %0 = atomicrmw volatile xchg i32 addrspace(1)* %gep, i32 %in seq_cst + %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4 + %val = atomicrmw volatile xchg i32 addrspace(1)* %gep, i32 %in seq_cst ret void } @@ -689,32 +710,35 @@ ; GCN: buffer_store_dword [[RET]] define void @atomic_xchg_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) { entry: - %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4 - %0 = atomicrmw volatile xchg i32 addrspace(1)* %gep, i32 %in seq_cst - store i32 %0, i32 addrspace(1)* %out2 + %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4 + %val = atomicrmw volatile xchg i32 addrspace(1)* %gep, i32 %in seq_cst + store i32 %val, i32 addrspace(1)* %out2 ret void } ; FUNC-LABEL: {{^}}atomic_xchg_i32_addr64_offset: ; SI: buffer_atomic_swap v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16{{$}} + +; VI: flat_atomic_swap v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}}{{$}} define void @atomic_xchg_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index - %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4 - %0 = atomicrmw volatile xchg i32 addrspace(1)* %gep, i32 %in seq_cst + %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4 + %val = atomicrmw volatile xchg i32 addrspace(1)* %gep, i32 %in seq_cst ret void } ; FUNC-LABEL: {{^}}atomic_xchg_i32_ret_addr64_offset: ; SI: buffer_atomic_swap [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16 glc{{$}} + ; VI: flat_atomic_swap [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}} ; GCN: buffer_store_dword [[RET]] define void @atomic_xchg_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index - %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4 - %0 = atomicrmw volatile xchg i32 addrspace(1)* %gep, i32 %in seq_cst - store i32 %0, i32 addrspace(1)* %out2 + %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4 + %val = atomicrmw volatile xchg i32 addrspace(1)* %gep, i32 %in seq_cst + store i32 %val, i32 addrspace(1)* %out2 ret void } @@ -722,7 +746,7 @@ ; GCN: buffer_atomic_swap v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}} define void @atomic_xchg_i32(i32 addrspace(1)* %out, i32 %in) { entry: - %0 = atomicrmw volatile xchg i32 addrspace(1)* %out, i32 %in seq_cst + %val = atomicrmw volatile xchg i32 addrspace(1)* %out, i32 %in seq_cst ret void } @@ -731,8 +755,8 @@ ; GCN: buffer_store_dword [[RET]] define void @atomic_xchg_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) { entry: - %0 = atomicrmw volatile xchg i32 addrspace(1)* %out, i32 %in seq_cst - store i32 %0, i32 addrspace(1)* %out2 + %val = atomicrmw volatile xchg i32 addrspace(1)* %out, i32 %in seq_cst + store i32 %val, i32 addrspace(1)* %out2 ret void } @@ -742,7 +766,7 @@ define void @atomic_xchg_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index - %0 = atomicrmw volatile xchg i32 addrspace(1)* %ptr, i32 %in seq_cst + %val = atomicrmw volatile xchg i32 addrspace(1)* %ptr, i32 %in seq_cst ret void } @@ -753,19 +777,17 @@ define void @atomic_xchg_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index - %0 = atomicrmw volatile xchg i32 addrspace(1)* %ptr, i32 %in seq_cst - store i32 %0, i32 addrspace(1)* %out2 + %val = atomicrmw volatile xchg i32 addrspace(1)* %ptr, i32 %in seq_cst + store i32 %val, i32 addrspace(1)* %out2 ret void } -; CMP_SWAP - ; FUNC-LABEL: {{^}}atomic_cmpxchg_i32_offset: ; GCN: buffer_atomic_cmpswap v[{{[0-9]+}}:{{[0-9]+}}], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}} define void @atomic_cmpxchg_i32_offset(i32 addrspace(1)* %out, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4 - %0 = cmpxchg volatile i32 addrspace(1)* %gep, i32 %old, i32 %in seq_cst seq_cst + %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4 + %val = cmpxchg volatile i32 addrspace(1)* %gep, i32 %old, i32 %in seq_cst seq_cst ret void } @@ -774,20 +796,22 @@ ; GCN: buffer_store_dword v[[RET]] define void @atomic_cmpxchg_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i32 %old) { entry: - %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4 - %0 = cmpxchg volatile i32 addrspace(1)* %gep, i32 %old, i32 %in seq_cst seq_cst - %1 = extractvalue { i32, i1 } %0, 0 - store i32 %1, i32 addrspace(1)* %out2 + %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4 + %val = cmpxchg volatile i32 addrspace(1)* %gep, i32 %old, i32 %in seq_cst seq_cst + %extract0 = extractvalue { i32, i1 } %val, 0 + store i32 %extract0, i32 addrspace(1)* %out2 ret void } ; FUNC-LABEL: {{^}}atomic_cmpxchg_i32_addr64_offset: ; SI: buffer_atomic_cmpswap v[{{[0-9]+\:[0-9]+}}], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16{{$}} + +; VI: flat_atomic_cmpswap v[{{[0-9]+\:[0-9]+}}], v[{{[0-9]+}}:{{[0-9]+}}]{{$}} define void @atomic_cmpxchg_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index, i32 %old) { entry: %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index - %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4 - %0 = cmpxchg volatile i32 addrspace(1)* %gep, i32 %old, i32 %in seq_cst seq_cst + %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4 + %val = cmpxchg volatile i32 addrspace(1)* %gep, i32 %old, i32 %in seq_cst seq_cst ret void } @@ -798,10 +822,10 @@ define void @atomic_cmpxchg_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index, i32 %old) { entry: %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index - %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4 - %0 = cmpxchg volatile i32 addrspace(1)* %gep, i32 %old, i32 %in seq_cst seq_cst - %1 = extractvalue { i32, i1 } %0, 0 - store i32 %1, i32 addrspace(1)* %out2 + %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4 + %val = cmpxchg volatile i32 addrspace(1)* %gep, i32 %old, i32 %in seq_cst seq_cst + %extract0 = extractvalue { i32, i1 } %val, 0 + store i32 %extract0, i32 addrspace(1)* %out2 ret void } @@ -809,7 +833,7 @@ ; GCN: buffer_atomic_cmpswap v[{{[0-9]+:[0-9]+}}], off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}} define void @atomic_cmpxchg_i32(i32 addrspace(1)* %out, i32 %in, i32 %old) { entry: - %0 = cmpxchg volatile i32 addrspace(1)* %out, i32 %old, i32 %in seq_cst seq_cst + %val = cmpxchg volatile i32 addrspace(1)* %out, i32 %old, i32 %in seq_cst seq_cst ret void } @@ -818,9 +842,9 @@ ; GCN: buffer_store_dword v[[RET]] define void @atomic_cmpxchg_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i32 %old) { entry: - %0 = cmpxchg volatile i32 addrspace(1)* %out, i32 %old, i32 %in seq_cst seq_cst - %1 = extractvalue { i32, i1 } %0, 0 - store i32 %1, i32 addrspace(1)* %out2 + %val = cmpxchg volatile i32 addrspace(1)* %out, i32 %old, i32 %in seq_cst seq_cst + %extract0 = extractvalue { i32, i1 } %val, 0 + store i32 %extract0, i32 addrspace(1)* %out2 ret void } @@ -830,7 +854,7 @@ define void @atomic_cmpxchg_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index, i32 %old) { entry: %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index - %0 = cmpxchg volatile i32 addrspace(1)* %ptr, i32 %old, i32 %in seq_cst seq_cst + %val = cmpxchg volatile i32 addrspace(1)* %ptr, i32 %old, i32 %in seq_cst seq_cst ret void } @@ -841,9 +865,9 @@ define void @atomic_cmpxchg_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index, i32 %old) { entry: %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index - %0 = cmpxchg volatile i32 addrspace(1)* %ptr, i32 %old, i32 %in seq_cst seq_cst - %1 = extractvalue { i32, i1 } %0, 0 - store i32 %1, i32 addrspace(1)* %out2 + %val = cmpxchg volatile i32 addrspace(1)* %ptr, i32 %old, i32 %in seq_cst seq_cst + %extract0 = extractvalue { i32, i1 } %val, 0 + store i32 %extract0, i32 addrspace(1)* %out2 ret void } @@ -851,8 +875,8 @@ ; GCN: buffer_atomic_xor v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}} define void @atomic_xor_i32_offset(i32 addrspace(1)* %out, i32 %in) { entry: - %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4 - %0 = atomicrmw volatile xor i32 addrspace(1)* %gep, i32 %in seq_cst + %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4 + %val = atomicrmw volatile xor i32 addrspace(1)* %gep, i32 %in seq_cst ret void } @@ -861,9 +885,9 @@ ; GCN: buffer_store_dword [[RET]] define void @atomic_xor_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) { entry: - %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4 - %0 = atomicrmw volatile xor i32 addrspace(1)* %gep, i32 %in seq_cst - store i32 %0, i32 addrspace(1)* %out2 + %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4 + %val = atomicrmw volatile xor i32 addrspace(1)* %gep, i32 %in seq_cst + store i32 %val, i32 addrspace(1)* %out2 ret void } @@ -873,8 +897,8 @@ define void @atomic_xor_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index - %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4 - %0 = atomicrmw volatile xor i32 addrspace(1)* %gep, i32 %in seq_cst + %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4 + %val = atomicrmw volatile xor i32 addrspace(1)* %gep, i32 %in seq_cst ret void } @@ -885,9 +909,9 @@ define void @atomic_xor_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index - %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4 - %0 = atomicrmw volatile xor i32 addrspace(1)* %gep, i32 %in seq_cst - store i32 %0, i32 addrspace(1)* %out2 + %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4 + %val = atomicrmw volatile xor i32 addrspace(1)* %gep, i32 %in seq_cst + store i32 %val, i32 addrspace(1)* %out2 ret void } @@ -895,7 +919,7 @@ ; GCN: buffer_atomic_xor v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}} define void @atomic_xor_i32(i32 addrspace(1)* %out, i32 %in) { entry: - %0 = atomicrmw volatile xor i32 addrspace(1)* %out, i32 %in seq_cst + %val = atomicrmw volatile xor i32 addrspace(1)* %out, i32 %in seq_cst ret void } @@ -904,8 +928,8 @@ ; GCN: buffer_store_dword [[RET]] define void @atomic_xor_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) { entry: - %0 = atomicrmw volatile xor i32 addrspace(1)* %out, i32 %in seq_cst - store i32 %0, i32 addrspace(1)* %out2 + %val = atomicrmw volatile xor i32 addrspace(1)* %out, i32 %in seq_cst + store i32 %val, i32 addrspace(1)* %out2 ret void } @@ -915,7 +939,7 @@ define void @atomic_xor_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index - %0 = atomicrmw volatile xor i32 addrspace(1)* %ptr, i32 %in seq_cst + %val = atomicrmw volatile xor i32 addrspace(1)* %ptr, i32 %in seq_cst ret void } @@ -926,21 +950,20 @@ define void @atomic_xor_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index - %0 = atomicrmw volatile xor i32 addrspace(1)* %ptr, i32 %in seq_cst - store i32 %0, i32 addrspace(1)* %out2 + %val = atomicrmw volatile xor i32 addrspace(1)* %ptr, i32 %in seq_cst + store i32 %val, i32 addrspace(1)* %out2 ret void } -; ATOMIC_LOAD ; FUNC-LABEL: {{^}}atomic_load_i32_offset: ; SI: buffer_load_dword [[RET:v[0-9]+]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16 glc{{$}} ; VI: flat_load_dword [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}] glc{{$}} ; GCN: buffer_store_dword [[RET]] define void @atomic_load_i32_offset(i32 addrspace(1)* %in, i32 addrspace(1)* %out) { entry: - %gep = getelementptr i32, i32 addrspace(1)* %in, i32 4 - %0 = load atomic i32, i32 addrspace(1)* %gep seq_cst, align 4 - store i32 %0, i32 addrspace(1)* %out + %gep = getelementptr i32, i32 addrspace(1)* %in, i64 4 + %val = load atomic i32, i32 addrspace(1)* %gep seq_cst, align 4 + store i32 %val, i32 addrspace(1)* %out ret void } @@ -950,8 +973,8 @@ ; GCN: buffer_store_dword [[RET]] define void @atomic_load_i32(i32 addrspace(1)* %in, i32 addrspace(1)* %out) { entry: - %0 = load atomic i32, i32 addrspace(1)* %in seq_cst, align 4 - store i32 %0, i32 addrspace(1)* %out + %val = load atomic i32, i32 addrspace(1)* %in seq_cst, align 4 + store i32 %val, i32 addrspace(1)* %out ret void } @@ -962,9 +985,9 @@ define void @atomic_load_i32_addr64_offset(i32 addrspace(1)* %in, i32 addrspace(1)* %out, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(1)* %in, i64 %index - %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4 - %0 = load atomic i32, i32 addrspace(1)* %gep seq_cst, align 4 - store i32 %0, i32 addrspace(1)* %out + %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4 + %val = load atomic i32, i32 addrspace(1)* %gep seq_cst, align 4 + store i32 %val, i32 addrspace(1)* %out ret void } @@ -975,66 +998,17 @@ define void @atomic_load_i32_addr64(i32 addrspace(1)* %in, i32 addrspace(1)* %out, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(1)* %in, i64 %index - %0 = load atomic i32, i32 addrspace(1)* %ptr seq_cst, align 4 - store i32 %0, i32 addrspace(1)* %out - ret void -} - -; FUNC-LABEL: {{^}}atomic_load_i64_offset: -; SI: buffer_load_dwordx2 [[RET:v\[[0-9]+:[0-9]+\]]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:32 glc{{$}} -; VI: flat_load_dwordx2 [[RET:v\[[0-9]+:[0-9]\]]], v[{{[0-9]+}}:{{[0-9]+}}] glc{{$}} -; GCN: buffer_store_dwordx2 [[RET]] -define void @atomic_load_i64_offset(i64 addrspace(1)* %in, i64 addrspace(1)* %out) { -entry: - %gep = getelementptr i64, i64 addrspace(1)* %in, i64 4 - %0 = load atomic i64, i64 addrspace(1)* %gep seq_cst, align 8 - store i64 %0, i64 addrspace(1)* %out - ret void -} - -; FUNC-LABEL: {{^}}atomic_load_i64: -; SI: buffer_load_dwordx2 [[RET:v\[[0-9]+:[0-9]\]]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc -; VI: flat_load_dwordx2 [[RET:v\[[0-9]+:[0-9]\]]], v[{{[0-9]+}}:{{[0-9]+}}] glc -; GCN: buffer_store_dwordx2 [[RET]] -define void @atomic_load_i64(i64 addrspace(1)* %in, i64 addrspace(1)* %out) { -entry: - %0 = load atomic i64, i64 addrspace(1)* %in seq_cst, align 8 - store i64 %0, i64 addrspace(1)* %out + %val = load atomic i32, i32 addrspace(1)* %ptr seq_cst, align 4 + store i32 %val, i32 addrspace(1)* %out ret void } -; FUNC-LABEL: {{^}}atomic_load_i64_addr64_offset: -; SI: buffer_load_dwordx2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:32 glc{{$}} -; VI: flat_load_dwordx2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}] glc{{$}} -; GCN: buffer_store_dwordx2 [[RET]] -define void @atomic_load_i64_addr64_offset(i64 addrspace(1)* %in, i64 addrspace(1)* %out, i64 %index) { -entry: - %ptr = getelementptr i64, i64 addrspace(1)* %in, i64 %index - %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4 - %0 = load atomic i64, i64 addrspace(1)* %gep seq_cst, align 8 - store i64 %0, i64 addrspace(1)* %out - ret void -} - -; FUNC-LABEL: {{^}}atomic_load_i64_addr64: -; SI: buffer_load_dwordx2 [[RET:v\[[0-9]+:[0-9]\]]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}} -; VI: flat_load_dwordx2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}] glc{{$}} -; GCN: buffer_store_dwordx2 [[RET]] -define void @atomic_load_i64_addr64(i64 addrspace(1)* %in, i64 addrspace(1)* %out, i64 %index) { -entry: - %ptr = getelementptr i64, i64 addrspace(1)* %in, i64 %index - %0 = load atomic i64, i64 addrspace(1)* %ptr seq_cst, align 8 - store i64 %0, i64 addrspace(1)* %out - ret void -} - -; ATOMIC_STORE ; FUNC-LABEL: {{^}}atomic_store_i32_offset: ; SI: buffer_store_dword {{v[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16 glc{{$}} ; VI: flat_store_dword v[{{[0-9]+}}:{{[0-9]+}}], {{v[0-9]+}} glc{{$}} define void @atomic_store_i32_offset(i32 %in, i32 addrspace(1)* %out) { entry: - %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4 + %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4 store atomic i32 %in, i32 addrspace(1)* %gep seq_cst, align 4 ret void } @@ -1054,7 +1028,7 @@ define void @atomic_store_i32_addr64_offset(i32 %in, i32 addrspace(1)* %out, i64 %index) { entry: %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index - %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4 + %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4 store atomic i32 %in, i32 addrspace(1)* %gep seq_cst, align 4 ret void } @@ -1068,43 +1042,3 @@ store atomic i32 %in, i32 addrspace(1)* %ptr seq_cst, align 4 ret void } - -; FUNC-LABEL: {{^}}atomic_store_i64_offset: -; SI: buffer_store_dwordx2 [[RET:v\[[0-9]+:[0-9]+\]]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:32 glc{{$}} -; VI: flat_store_dwordx2 [[RET:v\[[0-9]+:[0-9]\]]], v[{{[0-9]+}}:{{[0-9]+}}] glc{{$}} -define void @atomic_store_i64_offset(i64 %in, i64 addrspace(1)* %out) { -entry: - %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4 - store atomic i64 %in, i64 addrspace(1)* %gep seq_cst, align 8 - ret void -} - -; FUNC-LABEL: {{^}}atomic_store_i64: -; SI: buffer_store_dwordx2 {{v\[[0-9]+:[0-9]\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc -; VI: flat_store_dwordx2 {{v\[[0-9]+:[0-9]\]}}, v[{{[0-9]+}}:{{[0-9]+}}] glc -define void @atomic_store_i64(i64 %in, i64 addrspace(1)* %out) { -entry: - store atomic i64 %in, i64 addrspace(1)* %out seq_cst, align 8 - ret void -} - -; FUNC-LABEL: {{^}}atomic_store_i64_addr64_offset: -; SI: buffer_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:32 glc{{$}} -; VI: flat_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, v[{{[0-9]+:[0-9]+}}] glc{{$}} -define void @atomic_store_i64_addr64_offset(i64 %in, i64 addrspace(1)* %out, i64 %index) { -entry: - %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index - %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4 - store atomic i64 %in, i64 addrspace(1)* %gep seq_cst, align 8 - ret void -} - -; FUNC-LABEL: {{^}}atomic_store_i64_addr64: -; SI: buffer_store_dwordx2 {{v\[[0-9]+:[0-9]\]}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}} -; VI: flat_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, v[{{[0-9]+:[0-9]+}}] glc{{$}} -define void @atomic_store_i64_addr64(i64 %in, i64 addrspace(1)* %out, i64 %index) { -entry: - %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index - store atomic i64 %in, i64 addrspace(1)* %ptr seq_cst, align 8 - ret void -} Index: test/CodeGen/AMDGPU/global_atomics_i64.ll =================================================================== --- test/CodeGen/AMDGPU/global_atomics_i64.ll +++ test/CodeGen/AMDGPU/global_atomics_i64.ll @@ -1,13 +1,12 @@ ; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=CI %s ; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s - ; GCN-LABEL: {{^}}atomic_add_i64_offset: ; GCN: buffer_atomic_add_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:32{{$}} define void @atomic_add_i64_offset(i64 addrspace(1)* %out, i64 %in) { entry: %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4 - %tmp0 = atomicrmw volatile add i64 addrspace(1)* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile add i64 addrspace(1)* %gep, i64 %in seq_cst ret void } @@ -17,7 +16,7 @@ define void @atomic_add_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) { entry: %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4 - %tmp0 = atomicrmw volatile add i64 addrspace(1)* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile add i64 addrspace(1)* %gep, i64 %in seq_cst store i64 %tmp0, i64 addrspace(1)* %out2 ret void } @@ -29,7 +28,7 @@ entry: %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4 - %tmp0 = atomicrmw volatile add i64 addrspace(1)* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile add i64 addrspace(1)* %gep, i64 %in seq_cst ret void } @@ -41,7 +40,7 @@ entry: %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4 - %tmp0 = atomicrmw volatile add i64 addrspace(1)* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile add i64 addrspace(1)* %gep, i64 %in seq_cst store i64 %tmp0, i64 addrspace(1)* %out2 ret void } @@ -50,7 +49,7 @@ ; GCN: buffer_atomic_add_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}} define void @atomic_add_i64(i64 addrspace(1)* %out, i64 %in) { entry: - %tmp0 = atomicrmw volatile add i64 addrspace(1)* %out, i64 %in seq_cst + %tmp0 = atomicrmw volatile add i64 addrspace(1)* %out, i64 %in seq_cst ret void } @@ -59,7 +58,7 @@ ; GCN: buffer_store_dwordx2 [[RET]] define void @atomic_add_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) { entry: - %tmp0 = atomicrmw volatile add i64 addrspace(1)* %out, i64 %in seq_cst + %tmp0 = atomicrmw volatile add i64 addrspace(1)* %out, i64 %in seq_cst store i64 %tmp0, i64 addrspace(1)* %out2 ret void } @@ -70,7 +69,7 @@ define void @atomic_add_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index) { entry: %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index - %tmp0 = atomicrmw volatile add i64 addrspace(1)* %ptr, i64 %in seq_cst + %tmp0 = atomicrmw volatile add i64 addrspace(1)* %ptr, i64 %in seq_cst ret void } @@ -81,7 +80,7 @@ define void @atomic_add_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) { entry: %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index - %tmp0 = atomicrmw volatile add i64 addrspace(1)* %ptr, i64 %in seq_cst + %tmp0 = atomicrmw volatile add i64 addrspace(1)* %ptr, i64 %in seq_cst store i64 %tmp0, i64 addrspace(1)* %out2 ret void } @@ -91,7 +90,7 @@ define void @atomic_and_i64_offset(i64 addrspace(1)* %out, i64 %in) { entry: %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4 - %tmp0 = atomicrmw volatile and i64 addrspace(1)* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile and i64 addrspace(1)* %gep, i64 %in seq_cst ret void } @@ -101,7 +100,7 @@ define void @atomic_and_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) { entry: %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4 - %tmp0 = atomicrmw volatile and i64 addrspace(1)* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile and i64 addrspace(1)* %gep, i64 %in seq_cst store i64 %tmp0, i64 addrspace(1)* %out2 ret void } @@ -113,7 +112,7 @@ entry: %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4 - %tmp0 = atomicrmw volatile and i64 addrspace(1)* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile and i64 addrspace(1)* %gep, i64 %in seq_cst ret void } @@ -125,7 +124,7 @@ entry: %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4 - %tmp0 = atomicrmw volatile and i64 addrspace(1)* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile and i64 addrspace(1)* %gep, i64 %in seq_cst store i64 %tmp0, i64 addrspace(1)* %out2 ret void } @@ -134,7 +133,7 @@ ; GCN: buffer_atomic_and_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}} define void @atomic_and_i64(i64 addrspace(1)* %out, i64 %in) { entry: - %tmp0 = atomicrmw volatile and i64 addrspace(1)* %out, i64 %in seq_cst + %tmp0 = atomicrmw volatile and i64 addrspace(1)* %out, i64 %in seq_cst ret void } @@ -143,7 +142,7 @@ ; GCN: buffer_store_dwordx2 [[RET]] define void @atomic_and_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) { entry: - %tmp0 = atomicrmw volatile and i64 addrspace(1)* %out, i64 %in seq_cst + %tmp0 = atomicrmw volatile and i64 addrspace(1)* %out, i64 %in seq_cst store i64 %tmp0, i64 addrspace(1)* %out2 ret void } @@ -154,7 +153,7 @@ define void @atomic_and_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index) { entry: %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index - %tmp0 = atomicrmw volatile and i64 addrspace(1)* %ptr, i64 %in seq_cst + %tmp0 = atomicrmw volatile and i64 addrspace(1)* %ptr, i64 %in seq_cst ret void } @@ -165,7 +164,7 @@ define void @atomic_and_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) { entry: %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index - %tmp0 = atomicrmw volatile and i64 addrspace(1)* %ptr, i64 %in seq_cst + %tmp0 = atomicrmw volatile and i64 addrspace(1)* %ptr, i64 %in seq_cst store i64 %tmp0, i64 addrspace(1)* %out2 ret void } @@ -175,7 +174,7 @@ define void @atomic_sub_i64_offset(i64 addrspace(1)* %out, i64 %in) { entry: %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4 - %tmp0 = atomicrmw volatile sub i64 addrspace(1)* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile sub i64 addrspace(1)* %gep, i64 %in seq_cst ret void } @@ -185,7 +184,7 @@ define void @atomic_sub_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) { entry: %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4 - %tmp0 = atomicrmw volatile sub i64 addrspace(1)* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile sub i64 addrspace(1)* %gep, i64 %in seq_cst store i64 %tmp0, i64 addrspace(1)* %out2 ret void } @@ -197,7 +196,7 @@ entry: %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4 - %tmp0 = atomicrmw volatile sub i64 addrspace(1)* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile sub i64 addrspace(1)* %gep, i64 %in seq_cst ret void } @@ -209,7 +208,7 @@ entry: %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4 - %tmp0 = atomicrmw volatile sub i64 addrspace(1)* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile sub i64 addrspace(1)* %gep, i64 %in seq_cst store i64 %tmp0, i64 addrspace(1)* %out2 ret void } @@ -218,7 +217,7 @@ ; GCN: buffer_atomic_sub_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}} define void @atomic_sub_i64(i64 addrspace(1)* %out, i64 %in) { entry: - %tmp0 = atomicrmw volatile sub i64 addrspace(1)* %out, i64 %in seq_cst + %tmp0 = atomicrmw volatile sub i64 addrspace(1)* %out, i64 %in seq_cst ret void } @@ -227,7 +226,7 @@ ; GCN: buffer_store_dwordx2 [[RET]] define void @atomic_sub_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) { entry: - %tmp0 = atomicrmw volatile sub i64 addrspace(1)* %out, i64 %in seq_cst + %tmp0 = atomicrmw volatile sub i64 addrspace(1)* %out, i64 %in seq_cst store i64 %tmp0, i64 addrspace(1)* %out2 ret void } @@ -238,7 +237,7 @@ define void @atomic_sub_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index) { entry: %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index - %tmp0 = atomicrmw volatile sub i64 addrspace(1)* %ptr, i64 %in seq_cst + %tmp0 = atomicrmw volatile sub i64 addrspace(1)* %ptr, i64 %in seq_cst ret void } @@ -249,7 +248,7 @@ define void @atomic_sub_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) { entry: %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index - %tmp0 = atomicrmw volatile sub i64 addrspace(1)* %ptr, i64 %in seq_cst + %tmp0 = atomicrmw volatile sub i64 addrspace(1)* %ptr, i64 %in seq_cst store i64 %tmp0, i64 addrspace(1)* %out2 ret void } @@ -259,7 +258,7 @@ define void @atomic_max_i64_offset(i64 addrspace(1)* %out, i64 %in) { entry: %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4 - %tmp0 = atomicrmw volatile max i64 addrspace(1)* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile max i64 addrspace(1)* %gep, i64 %in seq_cst ret void } @@ -269,7 +268,7 @@ define void @atomic_max_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) { entry: %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4 - %tmp0 = atomicrmw volatile max i64 addrspace(1)* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile max i64 addrspace(1)* %gep, i64 %in seq_cst store i64 %tmp0, i64 addrspace(1)* %out2 ret void } @@ -281,7 +280,7 @@ entry: %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4 - %tmp0 = atomicrmw volatile max i64 addrspace(1)* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile max i64 addrspace(1)* %gep, i64 %in seq_cst ret void } @@ -293,7 +292,7 @@ entry: %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4 - %tmp0 = atomicrmw volatile max i64 addrspace(1)* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile max i64 addrspace(1)* %gep, i64 %in seq_cst store i64 %tmp0, i64 addrspace(1)* %out2 ret void } @@ -302,7 +301,7 @@ ; GCN: buffer_atomic_smax_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}} define void @atomic_max_i64(i64 addrspace(1)* %out, i64 %in) { entry: - %tmp0 = atomicrmw volatile max i64 addrspace(1)* %out, i64 %in seq_cst + %tmp0 = atomicrmw volatile max i64 addrspace(1)* %out, i64 %in seq_cst ret void } @@ -311,7 +310,7 @@ ; GCN: buffer_store_dwordx2 [[RET]] define void @atomic_max_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) { entry: - %tmp0 = atomicrmw volatile max i64 addrspace(1)* %out, i64 %in seq_cst + %tmp0 = atomicrmw volatile max i64 addrspace(1)* %out, i64 %in seq_cst store i64 %tmp0, i64 addrspace(1)* %out2 ret void } @@ -322,7 +321,7 @@ define void @atomic_max_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index) { entry: %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index - %tmp0 = atomicrmw volatile max i64 addrspace(1)* %ptr, i64 %in seq_cst + %tmp0 = atomicrmw volatile max i64 addrspace(1)* %ptr, i64 %in seq_cst ret void } @@ -333,7 +332,7 @@ define void @atomic_max_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) { entry: %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index - %tmp0 = atomicrmw volatile max i64 addrspace(1)* %ptr, i64 %in seq_cst + %tmp0 = atomicrmw volatile max i64 addrspace(1)* %ptr, i64 %in seq_cst store i64 %tmp0, i64 addrspace(1)* %out2 ret void } @@ -343,7 +342,7 @@ define void @atomic_umax_i64_offset(i64 addrspace(1)* %out, i64 %in) { entry: %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4 - %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %gep, i64 %in seq_cst ret void } @@ -353,7 +352,7 @@ define void @atomic_umax_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) { entry: %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4 - %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %gep, i64 %in seq_cst store i64 %tmp0, i64 addrspace(1)* %out2 ret void } @@ -365,7 +364,7 @@ entry: %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4 - %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %gep, i64 %in seq_cst ret void } @@ -377,7 +376,7 @@ entry: %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4 - %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %gep, i64 %in seq_cst store i64 %tmp0, i64 addrspace(1)* %out2 ret void } @@ -386,7 +385,7 @@ ; GCN: buffer_atomic_umax_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}} define void @atomic_umax_i64(i64 addrspace(1)* %out, i64 %in) { entry: - %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %out, i64 %in seq_cst + %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %out, i64 %in seq_cst ret void } @@ -395,7 +394,7 @@ ; GCN: buffer_store_dwordx2 [[RET]] define void @atomic_umax_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) { entry: - %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %out, i64 %in seq_cst + %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %out, i64 %in seq_cst store i64 %tmp0, i64 addrspace(1)* %out2 ret void } @@ -406,7 +405,7 @@ define void @atomic_umax_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index) { entry: %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index - %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %ptr, i64 %in seq_cst + %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %ptr, i64 %in seq_cst ret void } @@ -417,7 +416,7 @@ define void @atomic_umax_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) { entry: %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index - %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %ptr, i64 %in seq_cst + %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %ptr, i64 %in seq_cst store i64 %tmp0, i64 addrspace(1)* %out2 ret void } @@ -427,7 +426,7 @@ define void @atomic_min_i64_offset(i64 addrspace(1)* %out, i64 %in) { entry: %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4 - %tmp0 = atomicrmw volatile min i64 addrspace(1)* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile min i64 addrspace(1)* %gep, i64 %in seq_cst ret void } @@ -437,7 +436,7 @@ define void @atomic_min_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) { entry: %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4 - %tmp0 = atomicrmw volatile min i64 addrspace(1)* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile min i64 addrspace(1)* %gep, i64 %in seq_cst store i64 %tmp0, i64 addrspace(1)* %out2 ret void } @@ -449,7 +448,7 @@ entry: %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4 - %tmp0 = atomicrmw volatile min i64 addrspace(1)* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile min i64 addrspace(1)* %gep, i64 %in seq_cst ret void } @@ -461,7 +460,7 @@ entry: %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4 - %tmp0 = atomicrmw volatile min i64 addrspace(1)* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile min i64 addrspace(1)* %gep, i64 %in seq_cst store i64 %tmp0, i64 addrspace(1)* %out2 ret void } @@ -470,7 +469,7 @@ ; GCN: buffer_atomic_smin_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}} define void @atomic_min_i64(i64 addrspace(1)* %out, i64 %in) { entry: - %tmp0 = atomicrmw volatile min i64 addrspace(1)* %out, i64 %in seq_cst + %tmp0 = atomicrmw volatile min i64 addrspace(1)* %out, i64 %in seq_cst ret void } @@ -479,7 +478,7 @@ ; GCN: buffer_store_dwordx2 [[RET]] define void @atomic_min_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) { entry: - %tmp0 = atomicrmw volatile min i64 addrspace(1)* %out, i64 %in seq_cst + %tmp0 = atomicrmw volatile min i64 addrspace(1)* %out, i64 %in seq_cst store i64 %tmp0, i64 addrspace(1)* %out2 ret void } @@ -490,7 +489,7 @@ define void @atomic_min_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index) { entry: %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index - %tmp0 = atomicrmw volatile min i64 addrspace(1)* %ptr, i64 %in seq_cst + %tmp0 = atomicrmw volatile min i64 addrspace(1)* %ptr, i64 %in seq_cst ret void } @@ -501,7 +500,7 @@ define void @atomic_min_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) { entry: %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index - %tmp0 = atomicrmw volatile min i64 addrspace(1)* %ptr, i64 %in seq_cst + %tmp0 = atomicrmw volatile min i64 addrspace(1)* %ptr, i64 %in seq_cst store i64 %tmp0, i64 addrspace(1)* %out2 ret void } @@ -511,7 +510,7 @@ define void @atomic_umin_i64_offset(i64 addrspace(1)* %out, i64 %in) { entry: %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4 - %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %gep, i64 %in seq_cst ret void } @@ -521,7 +520,7 @@ define void @atomic_umin_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) { entry: %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4 - %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %gep, i64 %in seq_cst store i64 %tmp0, i64 addrspace(1)* %out2 ret void } @@ -533,7 +532,7 @@ entry: %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4 - %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %gep, i64 %in seq_cst ret void } @@ -545,7 +544,7 @@ entry: %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4 - %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %gep, i64 %in seq_cst store i64 %tmp0, i64 addrspace(1)* %out2 ret void } @@ -554,7 +553,7 @@ ; GCN: buffer_atomic_umin_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}} define void @atomic_umin_i64(i64 addrspace(1)* %out, i64 %in) { entry: - %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %out, i64 %in seq_cst + %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %out, i64 %in seq_cst ret void } @@ -563,7 +562,7 @@ ; GCN: buffer_store_dwordx2 [[RET]] define void @atomic_umin_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) { entry: - %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %out, i64 %in seq_cst + %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %out, i64 %in seq_cst store i64 %tmp0, i64 addrspace(1)* %out2 ret void } @@ -574,7 +573,7 @@ define void @atomic_umin_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index) { entry: %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index - %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %ptr, i64 %in seq_cst + %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %ptr, i64 %in seq_cst ret void } @@ -585,7 +584,7 @@ define void @atomic_umin_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) { entry: %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index - %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %ptr, i64 %in seq_cst + %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %ptr, i64 %in seq_cst store i64 %tmp0, i64 addrspace(1)* %out2 ret void } @@ -595,7 +594,7 @@ define void @atomic_or_i64_offset(i64 addrspace(1)* %out, i64 %in) { entry: %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4 - %tmp0 = atomicrmw volatile or i64 addrspace(1)* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile or i64 addrspace(1)* %gep, i64 %in seq_cst ret void } @@ -605,7 +604,7 @@ define void @atomic_or_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) { entry: %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4 - %tmp0 = atomicrmw volatile or i64 addrspace(1)* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile or i64 addrspace(1)* %gep, i64 %in seq_cst store i64 %tmp0, i64 addrspace(1)* %out2 ret void } @@ -617,7 +616,7 @@ entry: %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4 - %tmp0 = atomicrmw volatile or i64 addrspace(1)* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile or i64 addrspace(1)* %gep, i64 %in seq_cst ret void } @@ -629,7 +628,7 @@ entry: %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4 - %tmp0 = atomicrmw volatile or i64 addrspace(1)* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile or i64 addrspace(1)* %gep, i64 %in seq_cst store i64 %tmp0, i64 addrspace(1)* %out2 ret void } @@ -638,7 +637,7 @@ ; GCN: buffer_atomic_or_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}} define void @atomic_or_i64(i64 addrspace(1)* %out, i64 %in) { entry: - %tmp0 = atomicrmw volatile or i64 addrspace(1)* %out, i64 %in seq_cst + %tmp0 = atomicrmw volatile or i64 addrspace(1)* %out, i64 %in seq_cst ret void } @@ -647,7 +646,7 @@ ; GCN: buffer_store_dwordx2 [[RET]] define void @atomic_or_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) { entry: - %tmp0 = atomicrmw volatile or i64 addrspace(1)* %out, i64 %in seq_cst + %tmp0 = atomicrmw volatile or i64 addrspace(1)* %out, i64 %in seq_cst store i64 %tmp0, i64 addrspace(1)* %out2 ret void } @@ -658,7 +657,7 @@ define void @atomic_or_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index) { entry: %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index - %tmp0 = atomicrmw volatile or i64 addrspace(1)* %ptr, i64 %in seq_cst + %tmp0 = atomicrmw volatile or i64 addrspace(1)* %ptr, i64 %in seq_cst ret void } @@ -669,7 +668,7 @@ define void @atomic_or_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) { entry: %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index - %tmp0 = atomicrmw volatile or i64 addrspace(1)* %ptr, i64 %in seq_cst + %tmp0 = atomicrmw volatile or i64 addrspace(1)* %ptr, i64 %in seq_cst store i64 %tmp0, i64 addrspace(1)* %out2 ret void } @@ -679,7 +678,7 @@ define void @atomic_xchg_i64_offset(i64 addrspace(1)* %out, i64 %in) { entry: %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4 - %tmp0 = atomicrmw volatile xchg i64 addrspace(1)* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile xchg i64 addrspace(1)* %gep, i64 %in seq_cst ret void } @@ -689,18 +688,19 @@ define void @atomic_xchg_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) { entry: %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4 - %tmp0 = atomicrmw volatile xchg i64 addrspace(1)* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile xchg i64 addrspace(1)* %gep, i64 %in seq_cst store i64 %tmp0, i64 addrspace(1)* %out2 ret void } ; GCN-LABEL: {{^}}atomic_xchg_i64_addr64_offset: ; CI: buffer_atomic_swap_x2 v{{\[[0-9]+:[0-9]+\]}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:32{{$}} +; VI: flat_atomic_swap_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}}{{$}} define void @atomic_xchg_i64_addr64_offset(i64 addrspace(1)* %out, i64 %in, i64 %index) { entry: %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4 - %tmp0 = atomicrmw volatile xchg i64 addrspace(1)* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile xchg i64 addrspace(1)* %gep, i64 %in seq_cst ret void } @@ -712,7 +712,7 @@ entry: %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4 - %tmp0 = atomicrmw volatile xchg i64 addrspace(1)* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile xchg i64 addrspace(1)* %gep, i64 %in seq_cst store i64 %tmp0, i64 addrspace(1)* %out2 ret void } @@ -721,7 +721,7 @@ ; GCN: buffer_atomic_swap_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}} define void @atomic_xchg_i64(i64 addrspace(1)* %out, i64 %in) { entry: - %tmp0 = atomicrmw volatile xchg i64 addrspace(1)* %out, i64 %in seq_cst + %tmp0 = atomicrmw volatile xchg i64 addrspace(1)* %out, i64 %in seq_cst ret void } @@ -730,7 +730,7 @@ ; GCN: buffer_store_dwordx2 [[RET]] define void @atomic_xchg_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) { entry: - %tmp0 = atomicrmw volatile xchg i64 addrspace(1)* %out, i64 %in seq_cst + %tmp0 = atomicrmw volatile xchg i64 addrspace(1)* %out, i64 %in seq_cst store i64 %tmp0, i64 addrspace(1)* %out2 ret void } @@ -741,7 +741,7 @@ define void @atomic_xchg_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index) { entry: %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index - %tmp0 = atomicrmw volatile xchg i64 addrspace(1)* %ptr, i64 %in seq_cst + %tmp0 = atomicrmw volatile xchg i64 addrspace(1)* %ptr, i64 %in seq_cst ret void } @@ -752,7 +752,7 @@ define void @atomic_xchg_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) { entry: %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index - %tmp0 = atomicrmw volatile xchg i64 addrspace(1)* %ptr, i64 %in seq_cst + %tmp0 = atomicrmw volatile xchg i64 addrspace(1)* %ptr, i64 %in seq_cst store i64 %tmp0, i64 addrspace(1)* %out2 ret void } @@ -762,7 +762,7 @@ define void @atomic_xor_i64_offset(i64 addrspace(1)* %out, i64 %in) { entry: %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4 - %tmp0 = atomicrmw volatile xor i64 addrspace(1)* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile xor i64 addrspace(1)* %gep, i64 %in seq_cst ret void } @@ -772,7 +772,7 @@ define void @atomic_xor_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) { entry: %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4 - %tmp0 = atomicrmw volatile xor i64 addrspace(1)* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile xor i64 addrspace(1)* %gep, i64 %in seq_cst store i64 %tmp0, i64 addrspace(1)* %out2 ret void } @@ -784,7 +784,7 @@ entry: %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4 - %tmp0 = atomicrmw volatile xor i64 addrspace(1)* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile xor i64 addrspace(1)* %gep, i64 %in seq_cst ret void } @@ -796,7 +796,7 @@ entry: %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4 - %tmp0 = atomicrmw volatile xor i64 addrspace(1)* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile xor i64 addrspace(1)* %gep, i64 %in seq_cst store i64 %tmp0, i64 addrspace(1)* %out2 ret void } @@ -805,7 +805,7 @@ ; GCN: buffer_atomic_xor_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}} define void @atomic_xor_i64(i64 addrspace(1)* %out, i64 %in) { entry: - %tmp0 = atomicrmw volatile xor i64 addrspace(1)* %out, i64 %in seq_cst + %tmp0 = atomicrmw volatile xor i64 addrspace(1)* %out, i64 %in seq_cst ret void } @@ -814,7 +814,7 @@ ; GCN: buffer_store_dwordx2 [[RET]] define void @atomic_xor_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) { entry: - %tmp0 = atomicrmw volatile xor i64 addrspace(1)* %out, i64 %in seq_cst + %tmp0 = atomicrmw volatile xor i64 addrspace(1)* %out, i64 %in seq_cst store i64 %tmp0, i64 addrspace(1)* %out2 ret void } @@ -825,7 +825,7 @@ define void @atomic_xor_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index) { entry: %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index - %tmp0 = atomicrmw volatile xor i64 addrspace(1)* %ptr, i64 %in seq_cst + %tmp0 = atomicrmw volatile xor i64 addrspace(1)* %ptr, i64 %in seq_cst ret void } @@ -836,7 +836,191 @@ define void @atomic_xor_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) { entry: %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index - %tmp0 = atomicrmw volatile xor i64 addrspace(1)* %ptr, i64 %in seq_cst + %tmp0 = atomicrmw volatile xor i64 addrspace(1)* %ptr, i64 %in seq_cst store i64 %tmp0, i64 addrspace(1)* %out2 ret void } + +; FUNC-LABEL: {{^}}atomic_load_i64_offset: +; CI: buffer_load_dwordx2 [[RET:v\[[0-9]+:[0-9]+\]]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:32 glc{{$}} +; VI: flat_load_dwordx2 [[RET:v\[[0-9]+:[0-9]\]]], v[{{[0-9]+}}:{{[0-9]+}}] glc{{$}} +; GCN: buffer_store_dwordx2 [[RET]] +define void @atomic_load_i64_offset(i64 addrspace(1)* %in, i64 addrspace(1)* %out) { +entry: + %gep = getelementptr i64, i64 addrspace(1)* %in, i64 4 + %val = load atomic i64, i64 addrspace(1)* %gep seq_cst, align 8 + store i64 %val, i64 addrspace(1)* %out + ret void +} + +; FUNC-LABEL: {{^}}atomic_load_i64: +; CI: buffer_load_dwordx2 [[RET:v\[[0-9]+:[0-9]\]]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc +; VI: flat_load_dwordx2 [[RET:v\[[0-9]+:[0-9]\]]], v[{{[0-9]+}}:{{[0-9]+}}] glc +; GCN: buffer_store_dwordx2 [[RET]] +define void @atomic_load_i64(i64 addrspace(1)* %in, i64 addrspace(1)* %out) { +entry: + %val = load atomic i64, i64 addrspace(1)* %in seq_cst, align 8 + store i64 %val, i64 addrspace(1)* %out + ret void +} + +; FUNC-LABEL: {{^}}atomic_load_i64_addr64_offset: +; CI: buffer_load_dwordx2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:32 glc{{$}} +; VI: flat_load_dwordx2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}] glc{{$}} +; GCN: buffer_store_dwordx2 [[RET]] +define void @atomic_load_i64_addr64_offset(i64 addrspace(1)* %in, i64 addrspace(1)* %out, i64 %index) { +entry: + %ptr = getelementptr i64, i64 addrspace(1)* %in, i64 %index + %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4 + %val = load atomic i64, i64 addrspace(1)* %gep seq_cst, align 8 + store i64 %val, i64 addrspace(1)* %out + ret void +} + +; FUNC-LABEL: {{^}}atomic_load_i64_addr64: +; CI: buffer_load_dwordx2 [[RET:v\[[0-9]+:[0-9]\]]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}} +; VI: flat_load_dwordx2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}] glc{{$}} +; GCN: buffer_store_dwordx2 [[RET]] +define void @atomic_load_i64_addr64(i64 addrspace(1)* %in, i64 addrspace(1)* %out, i64 %index) { +entry: + %ptr = getelementptr i64, i64 addrspace(1)* %in, i64 %index + %val = load atomic i64, i64 addrspace(1)* %ptr seq_cst, align 8 + store i64 %val, i64 addrspace(1)* %out + ret void +} + +; FUNC-LABEL: {{^}}atomic_store_i64_offset: +; CI: buffer_store_dwordx2 [[RET:v\[[0-9]+:[0-9]+\]]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:32 glc{{$}} +; VI: flat_store_dwordx2 [[RET:v\[[0-9]+:[0-9]\]]], v[{{[0-9]+}}:{{[0-9]+}}] glc{{$}} +define void @atomic_store_i64_offset(i64 %in, i64 addrspace(1)* %out) { +entry: + %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4 + store atomic i64 %in, i64 addrspace(1)* %gep seq_cst, align 8 + ret void +} + +; FUNC-LABEL: {{^}}atomic_store_i64: +; CI: buffer_store_dwordx2 {{v\[[0-9]+:[0-9]\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc +; VI: flat_store_dwordx2 {{v\[[0-9]+:[0-9]\]}}, v[{{[0-9]+}}:{{[0-9]+}}] glc +define void @atomic_store_i64(i64 %in, i64 addrspace(1)* %out) { +entry: + store atomic i64 %in, i64 addrspace(1)* %out seq_cst, align 8 + ret void +} + +; FUNC-LABEL: {{^}}atomic_store_i64_addr64_offset: +; CI: buffer_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:32 glc{{$}} +; VI: flat_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, v[{{[0-9]+:[0-9]+}}] glc{{$}} +define void @atomic_store_i64_addr64_offset(i64 %in, i64 addrspace(1)* %out, i64 %index) { +entry: + %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index + %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4 + store atomic i64 %in, i64 addrspace(1)* %gep seq_cst, align 8 + ret void +} + +; FUNC-LABEL: {{^}}atomic_store_i64_addr64: +; CI: buffer_store_dwordx2 {{v\[[0-9]+:[0-9]\]}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}} +; VI: flat_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, v[{{[0-9]+:[0-9]+}}] glc{{$}} +define void @atomic_store_i64_addr64(i64 %in, i64 addrspace(1)* %out, i64 %index) { +entry: + %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index + store atomic i64 %in, i64 addrspace(1)* %ptr seq_cst, align 8 + ret void +} + + + + + + + + +; FUNC-LABEL: {{^}}atomic_cmpxchg_i64_offset: +; GCN: buffer_atomic_cmpswapx2 v[{{[0-9]+}}:{{[0-9]+}}], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}} +define void @atomic_cmpxchg_i64_offset(i64 addrspace(1)* %out, i64 %in, i64 %old) { +entry: + %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4 + %val = cmpxchg volatile i64 addrspace(1)* %gep, i64 %old, i64 %in seq_cst seq_cst + ret void +} + +; FUNC-LABEL: {{^}}atomic_cmpxchg_i64_ret_offset: +; GCN: buffer_atomic_cmpswapx2 v{{\[}}[[RET:[0-9]+]]{{:[0-9]+}}], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16 glc{{$}} +; GCN: buffer_store_dwordx2 v[[RET]] +define void @atomic_cmpxchg_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %old) { +entry: + %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4 + %val = cmpxchg volatile i64 addrspace(1)* %gep, i64 %old, i64 %in seq_cst seq_cst + %extract0 = extractvalue { i64, i1 } %val, 0 + store i64 %extract0, i64 addrspace(1)* %out2 + ret void +} + +; FUNC-LABEL: {{^}}atomic_cmpxchg_i64_addr64_offset: +; SI: buffer_atomic_cmpswapx2 v[{{[0-9]+\:[0-9]+}}], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16{{$}} + +; VI: flat_atomic_cmpswapx2 v[{{[0-9]+\:[0-9]+}}], v[{{[0-9]+}}:{{[0-9]+}}]{{$}} +define void @atomic_cmpxchg_i64_addr64_offset(i64 addrspace(1)* %out, i64 %in, i64 %index, i64 %old) { +entry: + %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index + %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4 + %val = cmpxchg volatile i64 addrspace(1)* %gep, i64 %old, i64 %in seq_cst seq_cst + ret void +} + +; FUNC-LABEL: {{^}}atomic_cmpxchg_i64_ret_addr64_offset: +; SI: buffer_atomic_cmpswapx2 v{{\[}}[[RET:[0-9]+]]:{{[0-9]+}}], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16 glc{{$}} +; VI: flat_atomic_cmpswapx2 v[[RET:[0-9]+]], v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}] glc{{$}} +; GCN: buffer_store_dword v[[RET]] +define void @atomic_cmpxchg_i64_ret_addr64_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index, i64 %old) { +entry: + %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index + %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4 + %val = cmpxchg volatile i64 addrspace(1)* %gep, i64 %old, i64 %in seq_cst seq_cst + %extract0 = extractvalue { i64, i1 } %val, 0 + store i64 %extract0, i64 addrspace(1)* %out2 + ret void +} + +; FUNC-LABEL: {{^}}atomic_cmpxchg_i64: +; GCN: buffer_atomic_cmpswap v[{{[0-9]+:[0-9]+}}], off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}} +define void @atomic_cmpxchg_i64(i64 addrspace(1)* %out, i64 %in, i64 %old) { +entry: + %val = cmpxchg volatile i64 addrspace(1)* %out, i64 %old, i64 %in seq_cst seq_cst + ret void +} + +; FUNC-LABEL: {{^}}atomic_cmpxchg_i64_ret: +; GCN: buffer_atomic_cmpswap v{{\[}}[[RET:[0-9]+]]:{{[0-9]+}}], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc +; GCN: buffer_store_dword v[[RET]] +define void @atomic_cmpxchg_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %old) { +entry: + %val = cmpxchg volatile i64 addrspace(1)* %out, i64 %old, i64 %in seq_cst seq_cst + %extract0 = extractvalue { i64, i1 } %val, 0 + store i64 %extract0, i64 addrspace(1)* %out2 + ret void +} + +; FUNC-LABEL: {{^}}atomic_cmpxchg_i64_addr64: +; SI: buffer_atomic_cmpswap v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}} +; VI: flat_atomic_cmpswap v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}]{{$}} +define void @atomic_cmpxchg_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index, i64 %old) { +entry: + %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index + %val = cmpxchg volatile i64 addrspace(1)* %ptr, i64 %old, i64 %in seq_cst seq_cst + ret void +} + +; FUNC-LABEL: {{^}}atomic_cmpxchg_i64_ret_addr64: +; SI: buffer_atomic_cmpswap v{{\[}}[[RET:[0-9]+]]:{{[0-9]+}}], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}} +; VI: flat_atomic_cmpswap v[[RET:[0-9]+]], v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}] glc{{$}} +; GCN: buffer_store_dword v[[RET]] +define void @atomic_cmpxchg_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index, i64 %old) { +entry: + %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index + %val = cmpxchg volatile i64 addrspace(1)* %ptr, i64 %old, i64 %in seq_cst seq_cst + %extract0 = extractvalue { i64, i1 } %val, 0 + store i64 %extract0, i64 addrspace(1)* %out2 + ret void +}