Index: llvm/trunk/lib/Target/AMDGPU/SIInstructions.td =================================================================== --- llvm/trunk/lib/Target/AMDGPU/SIInstructions.td +++ llvm/trunk/lib/Target/AMDGPU/SIInstructions.td @@ -1327,11 +1327,21 @@ (S_XOR_B64 $src0, $src1) >; +def : GCNPat < + (i1 (sub i1:$src0, i1:$src1)), + (S_XOR_B64 $src0, $src1) +>; + let AddedComplexity = 1 in { def : GCNPat < (i1 (add i1:$src0, (i1 -1))), (S_NOT_B64 $src0) >; + +def : GCNPat < + (i1 (sub i1:$src0, (i1 -1))), + (S_NOT_B64 $src0) +>; } def : GCNPat < Index: llvm/trunk/test/CodeGen/AMDGPU/add_i1.ll =================================================================== --- llvm/trunk/test/CodeGen/AMDGPU/add_i1.ll +++ llvm/trunk/test/CodeGen/AMDGPU/add_i1.ll @@ -19,3 +19,29 @@ store i1 %add, i1 addrspace(1)* %out ret void } + +; GCN-LABEL: {{^}}add_i1_cf: +; GCN: v_cmp_ne_u32_e32 vcc, 0, {{v[0-9]+}} +; GCN-NEXT: s_not_b64 s{{\[[0-9]+:[0-9]+\]}}, vcc +define amdgpu_kernel void @add_i1_cf(i1 addrspace(1)* %out, i1 addrspace(1)* %a, i1 addrspace(1)* %b) { +entry: + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %d_cmp = icmp ult i32 %tid, 16 + br i1 %d_cmp, label %if, label %else + +if: + %0 = load volatile i1, i1 addrspace(1)* %a + br label %endif + +else: + %1 = load volatile i1, i1 addrspace(1)* %b + br label %endif + +endif: + %2 = phi i1 [%0, %if], [%1, %else] + %3 = add i1 %2, -1 + store i1 %3, i1 addrspace(1)* %out + ret void +} + +declare i32 @llvm.amdgcn.workitem.id.x() Index: llvm/trunk/test/CodeGen/AMDGPU/sub_i1.ll =================================================================== --- llvm/trunk/test/CodeGen/AMDGPU/sub_i1.ll +++ llvm/trunk/test/CodeGen/AMDGPU/sub_i1.ll @@ -0,0 +1,47 @@ +; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s + + +; GCN-LABEL: {{^}}sub_var_var_i1: +; GCN: s_xor_b64 +define amdgpu_kernel void @sub_var_var_i1(i1 addrspace(1)* %out, i1 addrspace(1)* %in0, i1 addrspace(1)* %in1) { + %a = load volatile i1, i1 addrspace(1)* %in0 + %b = load volatile i1, i1 addrspace(1)* %in1 + %sub = sub i1 %a, %b + store i1 %sub, i1 addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}sub_var_imm_i1: +; GCN: s_not_b64 +define amdgpu_kernel void @sub_var_imm_i1(i1 addrspace(1)* %out, i1 addrspace(1)* %in) { + %a = load volatile i1, i1 addrspace(1)* %in + %sub = sub i1 %a, 1 + store i1 %sub, i1 addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}sub_i1_cf: +; GCN: v_cmp_ne_u32_e32 vcc, 0, {{v[0-9]+}} +; GCN-NEXT: s_not_b64 s{{\[[0-9]+:[0-9]+\]}}, vcc +define amdgpu_kernel void @sub_i1_cf(i1 addrspace(1)* %out, i1 addrspace(1)* %a, i1 addrspace(1)* %b) { +entry: + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %d_cmp = icmp ult i32 %tid, 16 + br i1 %d_cmp, label %if, label %else + +if: + %0 = load volatile i1, i1 addrspace(1)* %a + br label %endif + +else: + %1 = load volatile i1, i1 addrspace(1)* %b + br label %endif + +endif: + %2 = phi i1 [%0, %if], [%1, %else] + %3 = sub i1 %2, -1 + store i1 %3, i1 addrspace(1)* %out + ret void +} + +declare i32 @llvm.amdgcn.workitem.id.x()