diff --git a/llvm/test/CodeGen/AMDGPU/v_cmp_gfx11.ll b/llvm/test/CodeGen/AMDGPU/v_cmp_gfx11.ll --- a/llvm/test/CodeGen/AMDGPU/v_cmp_gfx11.ll +++ b/llvm/test/CodeGen/AMDGPU/v_cmp_gfx11.ll @@ -1,26 +1,72 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -verify-machineinstrs < %s | FileCheck -check-prefixes=CHECK %s +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mattr=-wavefrontsize32,+wavefrontsize64 --global-isel=0 -mcpu=gfx1100 -verify-machineinstrs < %s | FileCheck -check-prefixes=CHECK %s -define amdgpu_kernel void @test() { -; CHECK-LABEL: test: +define amdgpu_kernel void @icmp_test() { +; CHECK-LABEL: icmp_test: ; CHECK: ; %bb.0: ; %entry -; CHECK-NEXT: v_cmp_eq_u16_e64 s0, 0, 0 +; CHECK-NEXT: v_cmp_eq_u16_e64 s[0:1], 0, 0 ; CHECK-NEXT: v_mov_b32_e32 v1, 0 ; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) -; CHECK-NEXT: s_cmp_eq_u32 s0, 0 -; CHECK-NEXT: s_cselect_b32 s0, -1, 0 -; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_cmp_eq_u64 s[0:1], 0 +; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1] ; CHECK-NEXT: ds_store_b32 v1, v0 ; CHECK-NEXT: s_endpgm entry: - %0 = tail call i64 @llvm.amdgcn.icmp.i64.i16(i16 0, i16 0, i32 32) - %cmp0 = icmp eq i64 %0, 0 - %add0 = zext i1 %cmp0 to i32 - store i32 %add0, ptr addrspace(3) null, align 2147483648 + %icmp.intr = tail call i64 @llvm.amdgcn.icmp.i64.i16(i16 0, i16 0, i32 32) + %cmp0 = icmp eq i64 %icmp.intr, 0 + %zext0 = zext i1 %cmp0 to i32 + store i32 %zext0, ptr addrspace(3) null, align 2147483648 + ret void +} + +define amdgpu_kernel void @fcmp_test(half %x, half %y) { +; CHECK-LABEL: fcmp_test: +; CHECK: ; %bb.0: ; %entry +; CHECK-NEXT: s_load_b32 s0, s[0:1], 0x0 +; CHECK-NEXT: v_mov_b32_e32 v1, 0 +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: s_lshr_b32 s1, s0, 16 +; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; CHECK-NEXT: v_cmp_le_f16_e64 s[0:1], s0, s1 +; CHECK-NEXT: s_cmp_eq_u64 s[0:1], 0 +; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0 +; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1] +; CHECK-NEXT: ds_store_b32 v1, v0 +; CHECK-NEXT: s_endpgm +entry: + %fcmp.intr = tail call i64 @llvm.amdgcn.fcmp.i64.f16(half %x, half %y, i32 5) + %cmp0 = icmp eq i64 %fcmp.intr, 0 + %zext0 = zext i1 %cmp0 to i32 + store i32 %zext0, ptr addrspace(3) null, align 2147483648 + ret void +} + +define amdgpu_kernel void @ballot_test(half %x, half %y) { +; CHECK-LABEL: ballot_test: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_load_b32 s0, s[0:1], 0x0 +; CHECK-NEXT: v_mov_b32_e32 v2, 0 +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: s_lshr_b32 s1, s0, 16 +; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; CHECK-NEXT: v_cmp_eq_f16_e64 s[0:1], s0, s1 +; CHECK-NEXT: v_mov_b32_e32 v0, s0 +; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_2) +; CHECK-NEXT: v_mov_b32_e32 v1, s1 +; CHECK-NEXT: ds_store_b64 v2, v[0:1] +; CHECK-NEXT: s_endpgm + %cmp = fcmp oeq half %x, %y + %ballot = tail call i64 @llvm.amdgcn.ballot.i64(i1 %cmp) + store i64 %ballot, ptr addrspace(3) null, align 2147483648 ret void } -; Function Attrs: convergent nounwind readnone willreturn declare i64 @llvm.amdgcn.icmp.i64.i16(i16, i16, i32 immarg) #0 +declare i64 @llvm.amdgcn.fcmp.i64.f16(half, half, i32 immarg) #0 + +declare i64 @llvm.amdgcn.ballot.i64(i1) #0 + attributes #0 = { convergent nounwind readnone willreturn }