Index: lib/Target/AMDGPU/VOP3Instructions.td =================================================================== --- lib/Target/AMDGPU/VOP3Instructions.td +++ lib/Target/AMDGPU/VOP3Instructions.td @@ -468,6 +468,22 @@ } // End Predicates = [Has16BitInsts] +class ThreeOpFrag : PatFrag< + (ops node:$x, node:$y, node:$z), + // When the inner operation is used multiple times, selecting 3-op + // instructions may still be beneficial -- if the other users can be + // combined similarly. Let's be conservative for now. + (op2 (HasOneUseBinOp node:$x, node:$y), node:$z), + [{ + SDValue Src0 = Operands[0]; + SDValue Src1 = Operands[1]; + SDValue Src2 = Operands[2]; + return (Src0->isDivergent() + Src1->isDivergent() + Src2->isDivergent()) >= 2; + }] +> { + let PredicateCodeUsesOperands = 1; +} + let SubtargetPredicate = isGFX9 in { def V_PACK_B32_F16 : VOP3Inst <"v_pack_b32_f16", VOP3_Profile>; def V_LSHL_ADD_U32 : VOP3Inst <"v_lshl_add_u32", VOP3_Profile>; @@ -502,6 +518,20 @@ def V_ADD_I32_gfx9 : VOP3Inst <"v_add_i32_gfx9", VOP3_Profile>; def V_SUB_I32_gfx9 : VOP3Inst <"v_sub_i32_gfx9", VOP3_Profile>; + + +class ThreeOp_i32_Pats : GCNPat < + (ThreeOpFrag i32:$src0, i32:$src1, i32:$src2), //(op2 (op1 i32:$src0, i32:$src1), i32:$src2), + (inst i32:$src0, i32:$src1, i32:$src2) +>; + +def : ThreeOp_i32_Pats; +def : ThreeOp_i32_Pats; +def : ThreeOp_i32_Pats; +def : ThreeOp_i32_Pats; +def : ThreeOp_i32_Pats; +def : ThreeOp_i32_Pats; + } // End SubtargetPredicate = isGFX9 //===----------------------------------------------------------------------===// Index: test/CodeGen/AMDGPU/add3.ll =================================================================== --- /dev/null +++ test/CodeGen/AMDGPU/add3.ll @@ -0,0 +1,87 @@ +;RUN: llc < %s -march=amdgcn -mcpu=gfx900 | FileCheck -check-prefix=GCN %s + +; =================================================================================== +; V_ADD3_U32 +; =================================================================================== + +; GCN-LABEL: {{^}}add3: +; GCN: s_add_i32 s[[REG:[0-9]+]], {{s[0-9]+, s[0-9]+}} +; GCN: s_add_i32 s[[REG:[0-9]+]], {{s[0-9]+, s[0-9]+}} +define amdgpu_kernel void @add3(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c) { + %x = add i32 %a, %b + %result = add i32 %x, %c + store i32 %result, i32 addrspace(1)* %out + ret void +} + +; ThreeOp instruction variant not used due to Constant Bus Limitations +; TODO: with reassociation it is possible to replace a v_add_u32_e32 with a s_add_i32 +; GCN-LABEL: {{^}}add3_vgpr_b: +; GCN: v_add_u32_e32 v0, s0, v0 +; GCN: v_add_u32_e32 v0, s1, v0 +define amdgpu_ps float @add3_vgpr_b(i32 inreg %a, i32 %b, i32 inreg %c) { + %x = add i32 %a, %b + %result = add i32 %x, %c + %bc = bitcast i32 %result to float + ret float %bc +} + +; GCN-LABEL: {{^}}add3_vgpr_all: +; GCN: v_add3_u32 v0, v0, v1, v2 +define amdgpu_ps float @add3_vgpr_all(i32 %a, i32 %b, i32 %c) { + %x = add i32 %a, %b + %result = add i32 %x, %c + %bc = bitcast i32 %result to float + ret float %bc +} + +; GCN-LABEL: {{^}}add3_vgpr_all2: +; GCN: v_add3_u32 v0, v1, v2, v0 +define amdgpu_ps float @add3_vgpr_all2(i32 %a, i32 %b, i32 %c) { + %x = add i32 %b, %c + %result = add i32 %x, %a + %bc = bitcast i32 %result to float + ret float %bc +} + +; GCN-LABEL: {{^}}add3_vgpr_bc: +; GCN: v_add3_u32 v0, s0, v0, v1 +define amdgpu_ps float @add3_vgpr_bc(i32 inreg %a, i32 %b, i32 %c) { + %x = add i32 %a, %b + %result = add i32 %x, %c + %bc = bitcast i32 %result to float + ret float %bc +} + +; GCN-LABEL: {{^}}add3_vgpr_const: +; GCN: v_add3_u32 v0, v0, v1, 16 +define amdgpu_ps float @add3_vgpr_const(i32 %a, i32 %b) { + %x = add i32 %a, %b + %result = add i32 %x, 16 + %bc = bitcast i32 %result to float + ret float %bc +} + +; GCN-LABEL: {{^}}add3_multiuse_outer: +; GCN: v_add3_u32 v0, v0, v1, v2 +define amdgpu_ps <2 x float> @add3_multiuse_outer(i32 %a, i32 %b, i32 %c, i32 %x) { + %inner = add i32 %a, %b + %outer = add i32 %inner, %c + %x1 = mul i32 %outer, %x + %r1 = insertelement <2 x i32> undef, i32 %outer, i32 0 + %r0 = insertelement <2 x i32> %r1, i32 %x1, i32 1 + %bc = bitcast <2 x i32> %r0 to <2 x float> + ret <2 x float> %bc +} + +; GCN-LABEL: {{^}}add3_multiuse_inner: +; GCN: v_add_u32_e32 v0, v0, v1 +; GCN: v_add_u32_e32 v1, v0, v2 +define amdgpu_ps <2 x float> @add3_multiuse_inner(i32 %a, i32 %b, i32 %c) { + %inner = add i32 %a, %b + %outer = add i32 %inner, %c + %r1 = insertelement <2 x i32> undef, i32 %inner, i32 0 + %r0 = insertelement <2 x i32> %r1, i32 %outer, i32 1 + %bc = bitcast <2 x i32> %r0 to <2 x float> + ret <2 x float> %bc +} Index: test/CodeGen/AMDGPU/add_shl.ll =================================================================== --- /dev/null +++ test/CodeGen/AMDGPU/add_shl.ll @@ -0,0 +1,52 @@ +;RUN: llc < %s -march=amdgcn -mcpu=gfx900 | FileCheck -check-prefix=GCN %s + +; =================================================================================== +; V_ADD_LSHL_U32 +; =================================================================================== + +; GCN-LABEL: {{^}}add_shl: +; GCN: s_add_i32 s[[REG:[0-9]+]], {{s[0-9]+, s[0-9]+}} +; GCN: s_lshl_b32 s[[REG:[0-9]+]], {{s[0-9]+, s[0-9]+}} +define amdgpu_kernel void @add_shl(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c) { + %x = add i32 %a, %b + %result = shl i32 %x, %c + store i32 %result, i32 addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}add_shl_vgpr_c: +; GCN: s_add_i32 s0, s0, s1 +; GCN: v_lshlrev_b32_e64 v0, v0, s0 +define amdgpu_ps float @add_shl_vgpr_c(i32 inreg %a, i32 inreg %b, i32 %c) { + %x = add i32 %a, %b + %result = shl i32 %x, %c + %bc = bitcast i32 %result to float + ret float %bc +} + +; GCN-LABEL: {{^}}add_shl_vgpr_all: +; GCN: v_add_lshl_u32 v0, v0, v1, v2 +define amdgpu_ps float @add_shl_vgpr_all(i32 %a, i32 %b, i32 %c) { + %x = add i32 %a, %b + %result = shl i32 %x, %c + %bc = bitcast i32 %result to float + ret float %bc +} + +; GCN-LABEL: {{^}}add_shl_vgpr_ac: +; GCN: v_add_lshl_u32 v0, v0, s0, v1 +define amdgpu_ps float @add_shl_vgpr_ac(i32 %a, i32 inreg %b, i32 %c) { + %x = add i32 %a, %b + %result = shl i32 %x, %c + %bc = bitcast i32 %result to float + ret float %bc +} + +; GCN-LABEL: {{^}}add_shl_vgpr_const: +; GCN: v_add_lshl_u32 v0, v0, v1, 9 +define amdgpu_ps float @add_shl_vgpr_const(i32 %a, i32 %b) { + %x = add i32 %a, %b + %result = shl i32 %x, 9 + %bc = bitcast i32 %result to float + ret float %bc +} Index: test/CodeGen/AMDGPU/and_or.ll =================================================================== --- /dev/null +++ test/CodeGen/AMDGPU/and_or.ll @@ -0,0 +1,53 @@ +;RUN: llc < %s -march=amdgcn -mcpu=gfx900 | FileCheck -check-prefix=GCN %s + +; =================================================================================== +; V_AND_OR_B32 +; =================================================================================== + +; GCN-LABEL: {{^}}and_or: +; GCN: s_and_b32 s[[REG:[0-9]+]], {{s[0-9]+, s[0-9]+}} +; GCN: s_or_b32 s[[REG:[0-9]+]], {{s[0-9]+, s[0-9]+}} +define amdgpu_kernel void @and_or(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c) { + %x = and i32 %a, %b + %result = or i32 %x, %c + store i32 %result, i32 addrspace(1)* %out + ret void +} + +; ThreeOp instruction variant not used due to Constant Bus Limitations +; GCN-LABEL: {{^}}and_or_vgpr_b: +; GCN: v_and_b32_e32 v0, s0, v0 +; GCN: v_or_b32_e32 v0, s1, v0 +define amdgpu_ps float @and_or_vgpr_b(i32 inreg %a, i32 %b, i32 inreg %c) { + %x = and i32 %a, %b + %result = or i32 %x, %c + %bc = bitcast i32 %result to float + ret float %bc +} + +; GCN-LABEL: {{^}}and_or_vgpr_all: +; GCN: v_and_or_b32 v0, v0, v1, v2 +define amdgpu_ps float @and_or_vgpr_all(i32 %a, i32 %b, i32 %c) { + %x = and i32 %a, %b + %result = or i32 %x, %c + %bc = bitcast i32 %result to float + ret float %bc +} + +; GCN-LABEL: {{^}}and_or_vgpr_ab: +; GCN: v_and_or_b32 v0, v0, v1, s0 +define amdgpu_ps float @and_or_vgpr_ab(i32 %a, i32 %b, i32 inreg %c) { + %x = and i32 %a, %b + %result = or i32 %x, %c + %bc = bitcast i32 %result to float + ret float %bc +} + +; GCN-LABEL: {{^}}and_or_vgpr_const: +; GCN: v_and_or_b32 v0, v0, 4, v1 +define amdgpu_ps float @and_or_vgpr_const(i32 %a, i32 %b) { + %x = and i32 4, %a + %result = or i32 %x, %b + %bc = bitcast i32 %result to float + ret float %bc +} \ No newline at end of file Index: test/CodeGen/AMDGPU/or3.ll =================================================================== --- /dev/null +++ test/CodeGen/AMDGPU/or3.ll @@ -0,0 +1,63 @@ +;RUN: llc < %s -march=amdgcn -mcpu=gfx900 | FileCheck -check-prefix=GCN %s + +; =================================================================================== +; V_OR3_B32 +; =================================================================================== + +; GCN-LABEL: {{^}}or3: +; GCN: s_or_b32 s[[REG:[0-9]+]], {{s[0-9]+, s[0-9]+}} +; GCN: s_or_b32 s[[REG:[0-9]+]], {{s[0-9]+, s[0-9]+}} +define amdgpu_kernel void @or3(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c) { + %x = or i32 %a, %b + %result = or i32 %x, %c + store i32 %result, i32 addrspace(1)* %out + ret void +} + +; ThreeOp instruction variant not used due to Constant Bus Limitations +; TODO: with reassociation it is possible to replace a v_or_b32_e32 with a s_or_b32 +; GCN-LABEL: {{^}}or3_vgpr_a: +; GCN: v_or_b32_e32 v0, s0, v0 +; GCN: v_or_b32_e32 v0, s1, v0 +define amdgpu_ps float @or3_vgpr_a(i32 %a, i32 inreg %b, i32 inreg %c) { + %x = or i32 %a, %b + %result = or i32 %x, %c + %bc = bitcast i32 %result to float + ret float %bc +} + +; GCN-LABEL: {{^}}or3_vgpr_all: +; GCN: v_or3_b32 v0, v0, v1, v2 +define amdgpu_ps float @or3_vgpr_all(i32 %a, i32 %b, i32 %c) { + %x = or i32 %a, %b + %result = or i32 %x, %c + %bc = bitcast i32 %result to float + ret float %bc +} + +; GCN-LABEL: {{^}}or3_vgpr_all2: +; GCN: v_or3_b32 v0, v1, v2, v0 +define amdgpu_ps float @or3_vgpr_all2(i32 %a, i32 %b, i32 %c) { + %x = or i32 %b, %c + %result = or i32 %x, %a + %bc = bitcast i32 %result to float + ret float %bc +} + +; GCN-LABEL: {{^}}or3_vgpr_bc: +; GCN: v_or3_b32 v0, s0, v0, v1 +define amdgpu_ps float @or3_vgpr_bc(i32 inreg %a, i32 %b, i32 %c) { + %x = or i32 %a, %b + %result = or i32 %x, %c + %bc = bitcast i32 %result to float + ret float %bc +} + +; GCN-LABEL: {{^}}or3_vgpr_const: +; GCN: v_or3_b32 v0, v1, v0, 64 +define amdgpu_ps float @or3_vgpr_const(i32 %a, i32 %b) { + %x = or i32 64, %b + %result = or i32 %x, %a + %bc = bitcast i32 %result to float + ret float %bc +} Index: test/CodeGen/AMDGPU/shl_add.ll =================================================================== --- /dev/null +++ test/CodeGen/AMDGPU/shl_add.ll @@ -0,0 +1,53 @@ +;RUN: llc < %s -march=amdgcn -mcpu=gfx900 | FileCheck -check-prefix=GCN %s + +; =================================================================================== +; V_LSHL_ADD_U32 +; =================================================================================== + +; GCN-LABEL: {{^}}shl_add: +; GCN: s_lshl_b32 s[[REG:[0-9]+]], {{s[0-9]+, s[0-9]+}} +; GCN: s_add_i32 s[[REG:[0-9]+]], {{s[0-9]+, s[0-9]+}} +define amdgpu_kernel void @shl_add(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c) { + %x = shl i32 %a, %b + %result = add i32 %x, %c + store i32 %result, i32 addrspace(1)* %out + ret void +} + +; ThreeOp instruction variant not used due to Constant Bus Limitations +; GCN-LABEL: {{^}}shl_add_vgpr_a: +; GCN: v_lshlrev_b32_e32 v0, s0, v0 +; GCN: v_add_u32_e32 v0, s1, v0 +define amdgpu_ps float @shl_add_vgpr_a(i32 %a, i32 inreg %b, i32 inreg %c) { + %x = shl i32 %a, %b + %result = add i32 %x, %c + %bc = bitcast i32 %result to float + ret float %bc +} + +; GCN-LABEL: {{^}}shl_add_vgpr_all: +; GCN: v_lshl_add_u32 v0, v0, v1, v2 +define amdgpu_ps float @shl_add_vgpr_all(i32 %a, i32 %b, i32 %c) { + %x = shl i32 %a, %b + %result = add i32 %x, %c + %bc = bitcast i32 %result to float + ret float %bc +} + +; GCN-LABEL: {{^}}shl_add_vgpr_ab: +; GCN: v_lshl_add_u32 v0, v0, v1, s0 +define amdgpu_ps float @shl_add_vgpr_ab(i32 %a, i32 %b, i32 inreg %c) { + %x = shl i32 %a, %b + %result = add i32 %x, %c + %bc = bitcast i32 %result to float + ret float %bc +} + +; GCN-LABEL: {{^}}shl_add_vgpr_const: +; GCN: v_lshl_add_u32 v0, v0, 3, v1 +define amdgpu_ps float @shl_add_vgpr_const(i32 %a, i32 %b) { + %x = shl i32 %a, 3 + %result = add i32 %x, %b + %bc = bitcast i32 %result to float + ret float %bc +} Index: test/CodeGen/AMDGPU/shl_or.ll =================================================================== --- /dev/null +++ test/CodeGen/AMDGPU/shl_or.ll @@ -0,0 +1,81 @@ +;RUN: llc < %s -march=amdgcn -mcpu=gfx900 | FileCheck -check-prefix=GCN %s + +; =================================================================================== +; V_LSHL_OR_B32 +; =================================================================================== + +; GCN-LABEL: {{^}}shl_or: +; GCN: s_lshl_b32 s[[REG:[0-9]+]], {{s[0-9]+, s[0-9]+}} +; GCN: s_or_b32 s[[REG:[0-9]+]], {{s[0-9]+, s[0-9]+}} +define amdgpu_kernel void @shl_or(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c) { + %x = shl i32 %a, %b + %result = or i32 %x, %c + store i32 %result, i32 addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}shl_or_vgpr_c: +; GCN: s_lshl_b32 s0, s0, s1 +; GCN: v_or_b32_e32 v0, s0, v0 +define amdgpu_ps float @shl_or_vgpr_c(i32 inreg %a, i32 inreg %b, i32 %c) { + %x = shl i32 %a, %b + %result = or i32 %x, %c + %bc = bitcast i32 %result to float + ret float %bc +} + +; GCN-LABEL: {{^}}shl_or_vgpr_all: +; GCN: v_lshl_or_b32 v0, v0, v1, v2 +define amdgpu_ps float @shl_or_vgpr_all(i32 %a, i32 %b, i32 %c) { + %x = shl i32 %a, %b + %result = or i32 %x, %c + %bc = bitcast i32 %result to float + ret float %bc +} + +; GCN-LABEL: {{^}}shl_or_vgpr_ac: +; GCN: v_lshl_or_b32 v0, v0, s0, v1 +define amdgpu_ps float @shl_or_vgpr_ac(i32 %a, i32 inreg %b, i32 %c) { + %x = shl i32 %a, %b + %result = or i32 %x, %c + %bc = bitcast i32 %result to float + ret float %bc +} + +; GCN-LABEL: {{^}}shl_or_vgpr_const: +; GCN: v_lshl_or_b32 v0, v0, v1, 6 +define amdgpu_ps float @shl_or_vgpr_const(i32 %a, i32 %b) { + %x = shl i32 %a, %b + %result = or i32 %x, 6 + %bc = bitcast i32 %result to float + ret float %bc +} + +; GCN-LABEL: {{^}}shl_or_vgpr_const2: +; GCN: v_lshl_or_b32 v0, v0, 6, v1 +define amdgpu_ps float @shl_or_vgpr_const2(i32 %a, i32 %b) { + %x = shl i32 %a, 6 + %result = or i32 %x, %b + %bc = bitcast i32 %result to float + ret float %bc +} + +; GCN-LABEL: {{^}}shl_or_vgpr_const_scalar1: +; GCN: s_lshl_b32 s0, s0, 6 +; GCN: v_or_b32_e32 v0, s0, v0 +define amdgpu_ps float @shl_or_vgpr_const_scalar1(i32 inreg %a, i32 %b) { + %x = shl i32 %a, 6 + %result = or i32 %x, %b + %bc = bitcast i32 %result to float + ret float %bc +} + +; GCN-LABEL: {{^}}shl_or_vgpr_const_scalar2: +; GCN: v_lshlrev_b32_e32 v0, 6, v0 +; GCN: v_or_b32_e32 v0, s0, v0 +define amdgpu_ps float @shl_or_vgpr_const_scalar2(i32 %a, i32 inreg %b) { + %x = shl i32 %a, 6 + %result = or i32 %x, %b + %bc = bitcast i32 %result to float + ret float %bc +} Index: test/CodeGen/AMDGPU/sibling-call.ll =================================================================== --- test/CodeGen/AMDGPU/sibling-call.ll +++ test/CodeGen/AMDGPU/sibling-call.ll @@ -136,8 +136,7 @@ ; GFX9-NEXT: v_add_u32_e32 v0, v0, v1 -; GFX9: v_add_u32_e32 v0, v0, [[LOAD_0]] -; GFX9: v_add_u32_e32 v0, v0, [[LOAD_1]] +; GFX9: v_add3_u32 v0, v0, v3, v2 ; GCN-NEXT: s_setpc_b64 define fastcc i32 @i32_fastcc_i32_i32_a32i32(i32 %arg0, i32 %arg1, [32 x i32] %large) #1 {