diff --git a/llvm/test/CodeGen/AMDGPU/ds-sub-offset.ll b/llvm/test/CodeGen/AMDGPU/ds-sub-offset.ll --- a/llvm/test/CodeGen/AMDGPU/ds-sub-offset.ll +++ b/llvm/test/CodeGen/AMDGPU/ds-sub-offset.ll @@ -1,5 +1,6 @@ ; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,CI %s ; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX9 %s +; RUN: llc -march=amdgcn -mcpu=gfx900 -mattr=+unsafe-ds-offset-folding -verify-machineinstrs < %s | FileCheck -check-prefixes=CLAMPBIT %s declare i32 @llvm.amdgcn.workitem.id.x() #0 @@ -21,6 +22,21 @@ ret void } +; CLAMPBIT-LABEL: {{^}}write_ds_sub0_offset0_global_clamp_bit: +; CLAMPBIT: v_sub_u32 +; CLAMPBIT: s_endpgm +define amdgpu_kernel void @write_ds_sub0_offset0_global_clamp_bit(float %dummy.val) #0 { +entry: + %x.i = call i32 @llvm.amdgcn.workitem.id.x() #1 + %sub1 = sub i32 0, %x.i + %tmp0 = getelementptr [256 x i32], [256 x i32] addrspace(3)* @lds.obj, i32 0, i32 %sub1 + %arrayidx = getelementptr inbounds i32, i32 addrspace(3)* %tmp0, i32 3 + store i32 123, i32 addrspace(3)* %arrayidx + %fmas = call float @llvm.amdgcn.div.fmas.f32(float %dummy.val, float %dummy.val, float %dummy.val, i1 false) + store volatile float %fmas, float addrspace(1)* null + ret void +} + ; GCN-LABEL: {{^}}add_x_shl_neg_to_sub_max_offset: ; GCN-DAG: v_lshlrev_b32_e32 [[SCALED:v[0-9]+]], 2, v0 ; CI-DAG: v_sub_i32_e32 [[NEG:v[0-9]+]], vcc, 0, [[SCALED]] @@ -112,6 +128,21 @@ ret void } +; CLAMPBIT-LABEL: {{^}}add_x_shl_neg_to_sub_misaligned_i64_max_offset: +; CLAMPBIT: v_sub_u32 +; CLAMPBIT: s_endpgm +define amdgpu_kernel void @add_x_shl_neg_to_sub_misaligned_i64_max_offset_clamp_bit(float %dummy.val) #1 { + %x.i = call i32 @llvm.amdgcn.workitem.id.x() #0 + %neg = sub i32 0, %x.i + %shl = shl i32 %neg, 2 + %add = add i32 1019, %shl + %ptr = inttoptr i32 %add to i64 addrspace(3)* + store i64 123, i64 addrspace(3)* %ptr, align 4 + %fmas = call float @llvm.amdgcn.div.fmas.f32(float %dummy.val, float %dummy.val, float %dummy.val, i1 false) + store volatile float %fmas, float addrspace(1)* null + ret void +} + ; GCN-LABEL: {{^}}add_x_shl_neg_to_sub_misaligned_i64_max_offset_p1: ; GCN-DAG: v_lshlrev_b32_e32 [[SCALED:v[0-9]+]], 2, v0 ; CI-DAG: v_sub_i32_e32 [[NEG:v[0-9]+]], vcc, 0x3fc, [[SCALED]] @@ -127,6 +158,8 @@ ret void } +declare float @llvm.amdgcn.div.fmas.f32(float, float, float, i1) + attributes #0 = { nounwind readnone } attributes #1 = { nounwind } attributes #2 = { nounwind convergent } diff --git a/llvm/test/CodeGen/AMDGPU/uaddo.ll b/llvm/test/CodeGen/AMDGPU/uaddo.ll --- a/llvm/test/CodeGen/AMDGPU/uaddo.ll +++ b/llvm/test/CodeGen/AMDGPU/uaddo.ll @@ -192,6 +192,33 @@ ret void } +; FUNC-LABEL: {{^}}v_uaddo_clamp_bit: +; GCN: v_add_{{i|u|co_u}}32_e64 +; GCN: s_endpgm +define amdgpu_kernel void @v_uaddo_clamp_bit(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 addrspace(1)* %a.ptr, i32 addrspace(1)* %b.ptr) #0 { +entry: + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %tid.ext = sext i32 %tid to i64 + %a.gep = getelementptr inbounds i32, i32 addrspace(1)* %a.ptr + %b.gep = getelementptr inbounds i32, i32 addrspace(1)* %b.ptr + %a = load i32, i32 addrspace(1)* %a.gep + %b = load i32, i32 addrspace(1)* %b.gep + %uadd = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %a, i32 %b) + %val = extractvalue { i32, i1 } %uadd, 0 + %carry = extractvalue { i32, i1 } %uadd, 1 + %c2 = icmp eq i1 %carry, false + %cc = icmp eq i32 %a, %b + br i1 %cc, label %exit, label %if + +if: + br label %exit + +exit: + %cout = phi i1 [false, %entry], [%c2, %if] + store i32 %val, i32 addrspace(1)* %out, align 4 + store i1 %cout, i1 addrspace(1)* %carryout + ret void +} declare i32 @llvm.amdgcn.workitem.id.x() #1 declare { i16, i1 } @llvm.uadd.with.overflow.i16(i16, i16) #1 diff --git a/llvm/test/CodeGen/AMDGPU/usubo.ll b/llvm/test/CodeGen/AMDGPU/usubo.ll --- a/llvm/test/CodeGen/AMDGPU/usubo.ll +++ b/llvm/test/CodeGen/AMDGPU/usubo.ll @@ -176,6 +176,57 @@ ret void } +; FUNC-LABEL: {{^}}s_usubo_clamp_bit: +; GCN: v_sub_{{i|u|co_u}}32_e32 +; GCN: s_endpgm +define amdgpu_kernel void @s_usubo_clamp_bit(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 %a, i32 %b) #0 { +entry: + %usub = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %a, i32 %b) + %val = extractvalue { i32, i1 } %usub, 0 + %carry = extractvalue { i32, i1 } %usub, 1 + %c2 = icmp eq i1 %carry, false + %cc = icmp eq i32 %a, %b + br i1 %cc, label %exit, label %if + +if: + br label %exit + +exit: + %cout = phi i1 [false, %entry], [%c2, %if] + store i32 %val, i32 addrspace(1)* %out, align 4 + store i1 %cout, i1 addrspace(1)* %carryout + ret void +} + + +; FUNC-LABEL: {{^}}v_usubo_clamp_bit: +; GCN: v_sub_{{i|u|co_u}}32_e64 +; GCN: s_endpgm +define amdgpu_kernel void @v_usubo_clamp_bit(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 addrspace(1)* %a.ptr, i32 addrspace(1)* %b.ptr) #0 { +entry: + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %tid.ext = sext i32 %tid to i64 + %a.gep = getelementptr inbounds i32, i32 addrspace(1)* %a.ptr + %b.gep = getelementptr inbounds i32, i32 addrspace(1)* %b.ptr + %a = load i32, i32 addrspace(1)* %a.gep, align 4 + %b = load i32, i32 addrspace(1)* %b.gep, align 4 + %usub = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %a, i32 %b) + %val = extractvalue { i32, i1 } %usub, 0 + %carry = extractvalue { i32, i1 } %usub, 1 + %c2 = icmp eq i1 %carry, false + %cc = icmp eq i32 %a, %b + br i1 %cc, label %exit, label %if + +if: + br label %exit + +exit: + %cout = phi i1 [false, %entry], [%c2, %if] + store i32 %val, i32 addrspace(1)* %out, align 4 + store i1 %cout, i1 addrspace(1)* %carryout + ret void +} + declare i32 @llvm.amdgcn.workitem.id.x() #1 declare { i16, i1 } @llvm.usub.with.overflow.i16(i16, i16) #1 declare { i32, i1 } @llvm.usub.with.overflow.i32(i32, i32) #1