Index: llvm/lib/Target/AMDGPU/SOPInstructions.td =================================================================== --- llvm/lib/Target/AMDGPU/SOPInstructions.td +++ llvm/lib/Target/AMDGPU/SOPInstructions.td @@ -558,19 +558,19 @@ >; def S_NAND_B32 : SOP2_32 <"s_nand_b32", - [(set i32:$sdst, (not (and_oneuse i32:$src0, i32:$src1)))] + [(set i32:$sdst, (UniformUnaryFrag (and_oneuse i32:$src0, i32:$src1)))] >; def S_NAND_B64 : SOP2_64 <"s_nand_b64", - [(set i64:$sdst, (not (and_oneuse i64:$src0, i64:$src1)))] + [(set i64:$sdst, (UniformUnaryFrag (and_oneuse i64:$src0, i64:$src1)))] >; def S_NOR_B32 : SOP2_32 <"s_nor_b32", - [(set i32:$sdst, (not (or_oneuse i32:$src0, i32:$src1)))] + [(set i32:$sdst, (UniformUnaryFrag (or_oneuse i32:$src0, i32:$src1)))] >; def S_NOR_B64 : SOP2_64 <"s_nor_b64", - [(set i64:$sdst, (not (or_oneuse i64:$src0, i64:$src1)))] + [(set i64:$sdst, (UniformUnaryFrag (or_oneuse i64:$src0, i64:$src1)))] >; } // End isCommutable = 1 Index: llvm/lib/Target/AMDGPU/VOP3Instructions.td =================================================================== --- llvm/lib/Target/AMDGPU/VOP3Instructions.td +++ llvm/lib/Target/AMDGPU/VOP3Instructions.td @@ -667,6 +667,14 @@ def : VOPBinOpClampPat; def : VOPBinOpClampPat; +def : GCNPat<(getDivergentFrag.ret (or_oneuse i64:$src0, i64:$src1), i64:$src2), + (REG_SEQUENCE VReg_64, + (V_OR3_B32_e64 (i32 (EXTRACT_SUBREG $src0, sub0)), + (i32 (EXTRACT_SUBREG $src1, sub0)), + (i32 (EXTRACT_SUBREG $src2, sub0))), sub0, + (V_OR3_B32_e64 (i32 (EXTRACT_SUBREG $src0, sub1)), + (i32 (EXTRACT_SUBREG $src1, sub1)), + (i32 (EXTRACT_SUBREG $src2, sub1))), sub1)>; // FIXME: Probably should hardcode clamp bit in pseudo and avoid this. class OpSelBinOpClampPat addrspace(1)* %arg) { +; GCN-LABEL: divergent_or3_b32: +; GCN: ; %bb.0: ; %bb +; GCN-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GCN-NEXT: v_lshlrev_b32_e32 v3, 4, v0 +; GCN-NEXT: s_waitcnt lgkmcnt(0) +; GCN-NEXT: global_load_dwordx3 v[0:2], v3, s[0:1] +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_or3_b32 v0, v1, v0, v2 +; GCN-NEXT: v_not_b32_e32 v0, v0 +; GCN-NEXT: global_store_dword v3, v0, s[0:1] +; GCN-NEXT: s_endpgm +bb: + %i = tail call i32 @llvm.amdgcn.workitem.id.x() + %i1 = zext i32 %i to i64 + %i2 = getelementptr inbounds <3 x i32>, <3 x i32> addrspace(1)* %arg, i64 %i1 + %i3 = load <3 x i32>, <3 x i32> addrspace(1)* %i2, align 16 + %i4 = extractelement <3 x i32> %i3, i64 0 + %i5 = extractelement <3 x i32> %i3, i64 1 + %i6 = extractelement <3 x i32> %i3, i64 2 + %i7 = or i32 %i5, %i4 + %i8 = or i32 %i7, %i6 + %i9 = xor i32 %i8, -1 + %i10 = getelementptr inbounds <3 x i32>, <3 x i32> addrspace(1)* %i2, i64 0, i64 0 + store i32 %i9, i32 addrspace(1)* %i10, align 16 + ret void +} + +define amdgpu_kernel void @divergent_or3_b64(<3 x i64> addrspace(1)* %arg) { +; GCN-LABEL: divergent_or3_b64: +; GCN: ; %bb.0: ; %bb +; GCN-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GCN-NEXT: v_lshlrev_b32_e32 v6, 5, v0 +; GCN-NEXT: s_waitcnt lgkmcnt(0) +; GCN-NEXT: global_load_dwordx2 v[4:5], v6, s[0:1] offset:16 +; GCN-NEXT: global_load_dwordx4 v[0:3], v6, s[0:1] +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_or3_b32 v1, v3, v1, v5 +; GCN-NEXT: v_or3_b32 v0, v2, v0, v4 +; GCN-NEXT: v_not_b32_e32 v0, v0 +; GCN-NEXT: v_not_b32_e32 v1, v1 +; GCN-NEXT: global_store_dwordx2 v6, v[0:1], s[0:1] +; GCN-NEXT: s_endpgm +bb: + %i = tail call i32 @llvm.amdgcn.workitem.id.x() + %i1 = zext i32 %i to i64 + %i2 = getelementptr inbounds <3 x i64>, <3 x i64> addrspace(1)* %arg, i64 %i1 + %i3 = load <3 x i64>, <3 x i64> addrspace(1)* %i2, align 32 + %i4 = extractelement <3 x i64> %i3, i64 0 + %i5 = extractelement <3 x i64> %i3, i64 1 + %i6 = extractelement <3 x i64> %i3, i64 2 + %i7 = or i64 %i5, %i4 + %i8 = or i64 %i7, %i6 + %i9 = xor i64 %i8, -1 + %i10 = getelementptr inbounds <3 x i64>, <3 x i64> addrspace(1)* %i2, i64 0, i64 0 + store i64 %i9, i64 addrspace(1)* %i10, align 32 + ret void +} + +define amdgpu_kernel void @divergent_and3_b32(<3 x i32> addrspace(1)* %arg) { +; GCN-LABEL: divergent_and3_b32: +; GCN: ; %bb.0: ; %bb +; GCN-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GCN-NEXT: v_lshlrev_b32_e32 v3, 4, v0 +; GCN-NEXT: s_waitcnt lgkmcnt(0) +; GCN-NEXT: global_load_dwordx3 v[0:2], v3, s[0:1] +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_and_b32_e32 v0, v1, v0 +; GCN-NEXT: v_and_b32_e32 v0, v0, v2 +; GCN-NEXT: v_not_b32_e32 v0, v0 +; GCN-NEXT: global_store_dword v3, v0, s[0:1] +; GCN-NEXT: s_endpgm +bb: + %i = tail call i32 @llvm.amdgcn.workitem.id.x() + %i1 = zext i32 %i to i64 + %i2 = getelementptr inbounds <3 x i32>, <3 x i32> addrspace(1)* %arg, i64 %i1 + %i3 = load <3 x i32>, <3 x i32> addrspace(1)* %i2, align 16 + %i4 = extractelement <3 x i32> %i3, i64 0 + %i5 = extractelement <3 x i32> %i3, i64 1 + %i6 = extractelement <3 x i32> %i3, i64 2 + %i7 = and i32 %i5, %i4 + %i8 = and i32 %i7, %i6 + %i9 = xor i32 %i8, -1 + %i10 = getelementptr inbounds <3 x i32>, <3 x i32> addrspace(1)* %i2, i64 0, i64 0 + store i32 %i9, i32 addrspace(1)* %i10, align 16 + ret void +} + +define amdgpu_kernel void @divergent_and3_b64(<3 x i64> addrspace(1)* %arg) { +; GCN-LABEL: divergent_and3_b64: +; GCN: ; %bb.0: ; %bb +; GCN-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GCN-NEXT: v_lshlrev_b32_e32 v6, 5, v0 +; GCN-NEXT: s_waitcnt lgkmcnt(0) +; GCN-NEXT: global_load_dwordx2 v[4:5], v6, s[0:1] offset:16 +; GCN-NEXT: global_load_dwordx4 v[0:3], v6, s[0:1] +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_and_b32_e32 v1, v3, v1 +; GCN-NEXT: v_and_b32_e32 v0, v2, v0 +; GCN-NEXT: v_and_b32_e32 v1, v1, v5 +; GCN-NEXT: v_and_b32_e32 v0, v0, v4 +; GCN-NEXT: v_not_b32_e32 v0, v0 +; GCN-NEXT: v_not_b32_e32 v1, v1 +; GCN-NEXT: global_store_dwordx2 v6, v[0:1], s[0:1] +; GCN-NEXT: s_endpgm +bb: + %i = tail call i32 @llvm.amdgcn.workitem.id.x() + %i1 = zext i32 %i to i64 + %i2 = getelementptr inbounds <3 x i64>, <3 x i64> addrspace(1)* %arg, i64 %i1 + %i3 = load <3 x i64>, <3 x i64> addrspace(1)* %i2, align 32 + %i4 = extractelement <3 x i64> %i3, i64 0 + %i5 = extractelement <3 x i64> %i3, i64 1 + %i6 = extractelement <3 x i64> %i3, i64 2 + %i7 = and i64 %i5, %i4 + %i8 = and i64 %i7, %i6 + %i9 = xor i64 %i8, -1 + %i10 = getelementptr inbounds <3 x i64>, <3 x i64> addrspace(1)* %i2, i64 0, i64 0 + store i64 %i9, i64 addrspace(1)* %i10, align 32 + ret void +} + +define amdgpu_kernel void @divergent_xor3_b32(<3 x i32> addrspace(1)* %arg) { +; GCN-LABEL: divergent_xor3_b32: +; GCN: ; %bb.0: ; %bb +; GCN-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GCN-NEXT: v_lshlrev_b32_e32 v3, 4, v0 +; GCN-NEXT: s_waitcnt lgkmcnt(0) +; GCN-NEXT: global_load_dwordx3 v[0:2], v3, s[0:1] +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_xor_b32_e32 v0, v1, v0 +; GCN-NEXT: v_xnor_b32_e32 v0, v0, v2 +; GCN-NEXT: global_store_dword v3, v0, s[0:1] +; GCN-NEXT: s_endpgm +bb: + %i = tail call i32 @llvm.amdgcn.workitem.id.x() + %i1 = zext i32 %i to i64 + %i2 = getelementptr inbounds <3 x i32>, <3 x i32> addrspace(1)* %arg, i64 %i1 + %i3 = load <3 x i32>, <3 x i32> addrspace(1)* %i2, align 16 + %i4 = extractelement <3 x i32> %i3, i64 0 + %i5 = extractelement <3 x i32> %i3, i64 1 + %i6 = extractelement <3 x i32> %i3, i64 2 + %i7 = xor i32 %i5, %i4 + %i8 = xor i32 %i7, %i6 + %i9 = xor i32 %i8, -1 + %i10 = getelementptr inbounds <3 x i32>, <3 x i32> addrspace(1)* %i2, i64 0, i64 0 + store i32 %i9, i32 addrspace(1)* %i10, align 16 + ret void +} + +define amdgpu_kernel void @divergent_xor3_b64(<3 x i64> addrspace(1)* %arg) { +; GCN-LABEL: divergent_xor3_b64: +; GCN: ; %bb.0: ; %bb +; GCN-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GCN-NEXT: v_lshlrev_b32_e32 v6, 5, v0 +; GCN-NEXT: s_waitcnt lgkmcnt(0) +; GCN-NEXT: global_load_dwordx2 v[4:5], v6, s[0:1] offset:16 +; GCN-NEXT: global_load_dwordx4 v[0:3], v6, s[0:1] +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_xor_b32_e32 v1, v3, v1 +; GCN-NEXT: v_xor_b32_e32 v0, v2, v0 +; GCN-NEXT: v_xnor_b32_e32 v0, v0, v4 +; GCN-NEXT: v_xnor_b32_e32 v1, v1, v5 +; GCN-NEXT: global_store_dwordx2 v6, v[0:1], s[0:1] +; GCN-NEXT: s_endpgm +bb: + %i = tail call i32 @llvm.amdgcn.workitem.id.x() + %i1 = zext i32 %i to i64 + %i2 = getelementptr inbounds <3 x i64>, <3 x i64> addrspace(1)* %arg, i64 %i1 + %i3 = load <3 x i64>, <3 x i64> addrspace(1)* %i2, align 32 + %i4 = extractelement <3 x i64> %i3, i64 0 + %i5 = extractelement <3 x i64> %i3, i64 1 + %i6 = extractelement <3 x i64> %i3, i64 2 + %i7 = xor i64 %i5, %i4 + %i8 = xor i64 %i7, %i6 + %i9 = xor i64 %i8, -1 + %i10 = getelementptr inbounds <3 x i64>, <3 x i64> addrspace(1)* %i2, i64 0, i64 0 + store i64 %i9, i64 addrspace(1)* %i10, align 32 + ret void +} + +define amdgpu_kernel void @uniform_or3_b32(<3 x i32> addrspace(1)* %arg) { +; GCN-LABEL: uniform_or3_b32: +; GCN: ; %bb.0: ; %bb +; GCN-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24 +; GCN-NEXT: v_mov_b32_e32 v0, 0 +; GCN-NEXT: s_waitcnt lgkmcnt(0) +; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0 +; GCN-NEXT: s_waitcnt lgkmcnt(0) +; GCN-NEXT: s_or_b32 s0, s1, s0 +; GCN-NEXT: s_nor_b32 s0, s0, s2 +; GCN-NEXT: v_mov_b32_e32 v1, s0 +; GCN-NEXT: global_store_dword v0, v1, s[4:5] +; GCN-NEXT: s_endpgm +bb: + %i3 = load <3 x i32>, <3 x i32> addrspace(1)* %arg, align 16 + %i4 = extractelement <3 x i32> %i3, i64 0 + %i5 = extractelement <3 x i32> %i3, i64 1 + %i6 = extractelement <3 x i32> %i3, i64 2 + %i7 = or i32 %i5, %i4 + %i8 = or i32 %i7, %i6 + %i9 = xor i32 %i8, -1 + %i10 = getelementptr inbounds <3 x i32>, <3 x i32> addrspace(1)* %arg, i64 0, i64 0 + store i32 %i9, i32 addrspace(1)* %i10, align 16 + ret void +} + +define amdgpu_kernel void @uniform_or3_b64(<3 x i64> addrspace(1)* %arg) { +; GCN-LABEL: uniform_or3_b64: +; GCN: ; %bb.0: ; %bb +; GCN-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24 +; GCN-NEXT: v_mov_b32_e32 v2, 0 +; GCN-NEXT: s_waitcnt lgkmcnt(0) +; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0 +; GCN-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x10 +; GCN-NEXT: s_waitcnt lgkmcnt(0) +; GCN-NEXT: s_or_b64 s[0:1], s[2:3], s[0:1] +; GCN-NEXT: s_nor_b64 s[0:1], s[0:1], s[6:7] +; GCN-NEXT: v_mov_b32_e32 v0, s0 +; GCN-NEXT: v_mov_b32_e32 v1, s1 +; GCN-NEXT: global_store_dwordx2 v2, v[0:1], s[4:5] +; GCN-NEXT: s_endpgm +bb: + %i3 = load <3 x i64>, <3 x i64> addrspace(1)* %arg, align 32 + %i4 = extractelement <3 x i64> %i3, i64 0 + %i5 = extractelement <3 x i64> %i3, i64 1 + %i6 = extractelement <3 x i64> %i3, i64 2 + %i7 = or i64 %i5, %i4 + %i8 = or i64 %i7, %i6 + %i9 = xor i64 %i8, -1 + %i10 = getelementptr inbounds <3 x i64>, <3 x i64> addrspace(1)* %arg, i64 0, i64 0 + store i64 %i9, i64 addrspace(1)* %i10, align 32 + ret void +} + +define amdgpu_kernel void @uniform_and3_b32(<3 x i32> addrspace(1)* %arg) { +; GCN-LABEL: uniform_and3_b32: +; GCN: ; %bb.0: ; %bb +; GCN-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24 +; GCN-NEXT: v_mov_b32_e32 v0, 0 +; GCN-NEXT: s_waitcnt lgkmcnt(0) +; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0 +; GCN-NEXT: s_waitcnt lgkmcnt(0) +; GCN-NEXT: s_and_b32 s0, s1, s0 +; GCN-NEXT: s_nand_b32 s0, s0, s2 +; GCN-NEXT: v_mov_b32_e32 v1, s0 +; GCN-NEXT: global_store_dword v0, v1, s[4:5] +; GCN-NEXT: s_endpgm +bb: + %i3 = load <3 x i32>, <3 x i32> addrspace(1)* %arg, align 16 + %i4 = extractelement <3 x i32> %i3, i64 0 + %i5 = extractelement <3 x i32> %i3, i64 1 + %i6 = extractelement <3 x i32> %i3, i64 2 + %i7 = and i32 %i5, %i4 + %i8 = and i32 %i7, %i6 + %i9 = xor i32 %i8, -1 + %i10 = getelementptr inbounds <3 x i32>, <3 x i32> addrspace(1)* %arg, i64 0, i64 0 + store i32 %i9, i32 addrspace(1)* %i10, align 16 + ret void +} + +define amdgpu_kernel void @uniform_and3_b64(<3 x i64> addrspace(1)* %arg) { +; GCN-LABEL: uniform_and3_b64: +; GCN: ; %bb.0: ; %bb +; GCN-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24 +; GCN-NEXT: v_mov_b32_e32 v2, 0 +; GCN-NEXT: s_waitcnt lgkmcnt(0) +; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0 +; GCN-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x10 +; GCN-NEXT: s_waitcnt lgkmcnt(0) +; GCN-NEXT: s_and_b64 s[0:1], s[2:3], s[0:1] +; GCN-NEXT: s_nand_b64 s[0:1], s[0:1], s[6:7] +; GCN-NEXT: v_mov_b32_e32 v0, s0 +; GCN-NEXT: v_mov_b32_e32 v1, s1 +; GCN-NEXT: global_store_dwordx2 v2, v[0:1], s[4:5] +; GCN-NEXT: s_endpgm +bb: + %i3 = load <3 x i64>, <3 x i64> addrspace(1)* %arg, align 32 + %i4 = extractelement <3 x i64> %i3, i64 0 + %i5 = extractelement <3 x i64> %i3, i64 1 + %i6 = extractelement <3 x i64> %i3, i64 2 + %i7 = and i64 %i5, %i4 + %i8 = and i64 %i7, %i6 + %i9 = xor i64 %i8, -1 + %i10 = getelementptr inbounds <3 x i64>, <3 x i64> addrspace(1)* %arg, i64 0, i64 0 + store i64 %i9, i64 addrspace(1)* %i10, align 32 + ret void +} + +define amdgpu_kernel void @uniform_xor3_b32(<3 x i32> addrspace(1)* %arg) { +; GCN-LABEL: uniform_xor3_b32: +; GCN: ; %bb.0: ; %bb +; GCN-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24 +; GCN-NEXT: v_mov_b32_e32 v0, 0 +; GCN-NEXT: s_waitcnt lgkmcnt(0) +; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0 +; GCN-NEXT: s_waitcnt lgkmcnt(0) +; GCN-NEXT: s_xor_b32 s0, s1, s0 +; GCN-NEXT: s_xnor_b32 s0, s0, s2 +; GCN-NEXT: v_mov_b32_e32 v1, s0 +; GCN-NEXT: global_store_dword v0, v1, s[4:5] +; GCN-NEXT: s_endpgm +bb: + %i3 = load <3 x i32>, <3 x i32> addrspace(1)* %arg, align 16 + %i4 = extractelement <3 x i32> %i3, i64 0 + %i5 = extractelement <3 x i32> %i3, i64 1 + %i6 = extractelement <3 x i32> %i3, i64 2 + %i7 = xor i32 %i5, %i4 + %i8 = xor i32 %i7, %i6 + %i9 = xor i32 %i8, -1 + %i10 = getelementptr inbounds <3 x i32>, <3 x i32> addrspace(1)* %arg, i64 0, i64 0 + store i32 %i9, i32 addrspace(1)* %i10, align 16 + ret void +} + +define amdgpu_kernel void @uniform_xor3_b64(<3 x i64> addrspace(1)* %arg) { +; GCN-LABEL: uniform_xor3_b64: +; GCN: ; %bb.0: ; %bb +; GCN-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24 +; GCN-NEXT: v_mov_b32_e32 v2, 0 +; GCN-NEXT: s_waitcnt lgkmcnt(0) +; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0 +; GCN-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x10 +; GCN-NEXT: s_waitcnt lgkmcnt(0) +; GCN-NEXT: s_xor_b64 s[0:1], s[2:3], s[0:1] +; GCN-NEXT: s_xnor_b64 s[0:1], s[0:1], s[6:7] +; GCN-NEXT: v_mov_b32_e32 v0, s0 +; GCN-NEXT: v_mov_b32_e32 v1, s1 +; GCN-NEXT: global_store_dwordx2 v2, v[0:1], s[4:5] +; GCN-NEXT: s_endpgm +bb: + %i3 = load <3 x i64>, <3 x i64> addrspace(1)* %arg, align 32 + %i4 = extractelement <3 x i64> %i3, i64 0 + %i5 = extractelement <3 x i64> %i3, i64 1 + %i6 = extractelement <3 x i64> %i3, i64 2 + %i7 = xor i64 %i5, %i4 + %i8 = xor i64 %i7, %i6 + %i9 = xor i64 %i8, -1 + %i10 = getelementptr inbounds <3 x i64>, <3 x i64> addrspace(1)* %arg, i64 0, i64 0 + store i64 %i9, i64 addrspace(1)* %i10, align 32 + ret void +} + +declare i32 @llvm.amdgcn.workitem.id.x()