Index: lib/Target/AMDGPU/VOP3Instructions.td =================================================================== --- lib/Target/AMDGPU/VOP3Instructions.td +++ lib/Target/AMDGPU/VOP3Instructions.td @@ -468,6 +468,37 @@ } // End Predicates = [Has16BitInsts] +class ThreeOpFrag : PatFrag< + (ops node:$x, node:$y, node:$z), + // When the inner operation is used multiple times, selecting 3-op + // instructions may still be beneficial -- if the other users can be + // combined similarly. Let's be conservative for now. + (op2 (HasOneUseBinOp node:$x, node:$y), node:$z), + [{ + // Only use VALU ops when the result is divergent. + if (!N->isDivergent()) + return false; + + // Check constant bus limitations. + // + // Note: Use !isDivergent as a conservative proxy for whether the value + // is in an SGPR (uniform values can end up in VGPRs as well). + unsigned ConstantBusUses = 0; + for (unsigned i = 0; i < 3; ++i) { + if (!Operands[i]->isDivergent() && + !isInlineImmediate(Operands[i].getNode())) { + ConstantBusUses++; + if (ConstantBusUses >= 2) + return false; + } + } + + return true; + }] +> { + let PredicateCodeUsesOperands = 1; +} + let SubtargetPredicate = isGFX9 in { def V_PACK_B32_F16 : VOP3Inst <"v_pack_b32_f16", VOP3_Profile>; def V_LSHL_ADD_U32 : VOP3Inst <"v_lshl_add_u32", VOP3_Profile>; @@ -502,6 +533,21 @@ def V_ADD_I32_gfx9 : VOP3Inst <"v_add_i32_gfx9", VOP3_Profile>; def V_SUB_I32_gfx9 : VOP3Inst <"v_sub_i32_gfx9", VOP3_Profile>; + + +class ThreeOp_i32_Pats : GCNPat < + // This matches (op2 (op1 i32:$src0, i32:$src1), i32:$src2) with conditions. + (ThreeOpFrag i32:$src0, i32:$src1, i32:$src2), + (inst i32:$src0, i32:$src1, i32:$src2) +>; + +def : ThreeOp_i32_Pats; +def : ThreeOp_i32_Pats; +def : ThreeOp_i32_Pats; +def : ThreeOp_i32_Pats; +def : ThreeOp_i32_Pats; +def : ThreeOp_i32_Pats; + } // End SubtargetPredicate = isGFX9 //===----------------------------------------------------------------------===// Index: test/CodeGen/AMDGPU/add3.ll =================================================================== --- /dev/null +++ test/CodeGen/AMDGPU/add3.ll @@ -0,0 +1,171 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=amdgcn-amd-mesa3d -mcpu=fiji -verify-machineinstrs | FileCheck -check-prefix=VI %s +; RUN: llc < %s -mtriple=amdgcn-amd-mesa3d -mcpu=gfx900 -verify-machineinstrs | FileCheck -check-prefix=GFX9 %s + +; =================================================================================== +; V_ADD3_U32 +; =================================================================================== + +define amdgpu_ps float @add3(i32 %a, i32 %b, i32 %c) { +; VI-LABEL: add3: +; VI: ; %bb.0: +; VI-NEXT: v_add_u32_e32 v0, vcc, v1, v0 +; VI-NEXT: v_add_u32_e32 v0, vcc, v2, v0 +; VI-NEXT: ; return to shader part epilog +; +; GFX9-LABEL: add3: +; GFX9: ; %bb.0: +; GFX9-NEXT: v_add3_u32 v0, v0, v1, v2 +; GFX9-NEXT: ; return to shader part epilog + %x = add i32 %a, %b + %result = add i32 %x, %c + %bc = bitcast i32 %result to float + ret float %bc +} + +; ThreeOp instruction variant not used due to Constant Bus Limitations +; TODO: with reassociation it is possible to replace a v_add_u32_e32 with a s_add_i32 +define amdgpu_ps float @add3_vgpr_b(i32 inreg %a, i32 %b, i32 inreg %c) { +; VI-LABEL: add3_vgpr_b: +; VI: ; %bb.0: +; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0 +; VI-NEXT: v_add_u32_e32 v0, vcc, s1, v0 +; VI-NEXT: ; return to shader part epilog +; +; GFX9-LABEL: add3_vgpr_b: +; GFX9: ; %bb.0: +; GFX9-NEXT: v_add_u32_e32 v0, s0, v0 +; GFX9-NEXT: v_add_u32_e32 v0, s1, v0 +; GFX9-NEXT: ; return to shader part epilog + %x = add i32 %a, %b + %result = add i32 %x, %c + %bc = bitcast i32 %result to float + ret float %bc +} + +define amdgpu_ps float @add3_vgpr_all2(i32 %a, i32 %b, i32 %c) { +; VI-LABEL: add3_vgpr_all2: +; VI: ; %bb.0: +; VI-NEXT: v_add_u32_e32 v1, vcc, v2, v1 +; VI-NEXT: v_add_u32_e32 v0, vcc, v0, v1 +; VI-NEXT: ; return to shader part epilog +; +; GFX9-LABEL: add3_vgpr_all2: +; GFX9: ; %bb.0: +; GFX9-NEXT: v_add3_u32 v0, v1, v2, v0 +; GFX9-NEXT: ; return to shader part epilog + %x = add i32 %b, %c + %result = add i32 %a, %x + %bc = bitcast i32 %result to float + ret float %bc +} + +define amdgpu_ps float @add3_vgpr_bc(i32 inreg %a, i32 %b, i32 %c) { +; VI-LABEL: add3_vgpr_bc: +; VI: ; %bb.0: +; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0 +; VI-NEXT: v_add_u32_e32 v0, vcc, v1, v0 +; VI-NEXT: ; return to shader part epilog +; +; GFX9-LABEL: add3_vgpr_bc: +; GFX9: ; %bb.0: +; GFX9-NEXT: v_add3_u32 v0, s0, v0, v1 +; GFX9-NEXT: ; return to shader part epilog + %x = add i32 %a, %b + %result = add i32 %x, %c + %bc = bitcast i32 %result to float + ret float %bc +} + +define amdgpu_ps float @add3_vgpr_const(i32 %a, i32 %b) { +; VI-LABEL: add3_vgpr_const: +; VI: ; %bb.0: +; VI-NEXT: v_add_u32_e32 v0, vcc, v1, v0 +; VI-NEXT: v_add_u32_e32 v0, vcc, 16, v0 +; VI-NEXT: ; return to shader part epilog +; +; GFX9-LABEL: add3_vgpr_const: +; GFX9: ; %bb.0: +; GFX9-NEXT: v_add3_u32 v0, v0, v1, 16 +; GFX9-NEXT: ; return to shader part epilog + %x = add i32 %a, %b + %result = add i32 %x, 16 + %bc = bitcast i32 %result to float + ret float %bc +} + +define amdgpu_ps <2 x float> @add3_multiuse_outer(i32 %a, i32 %b, i32 %c, i32 %x) { +; VI-LABEL: add3_multiuse_outer: +; VI: ; %bb.0: +; VI-NEXT: v_add_u32_e32 v0, vcc, v1, v0 +; VI-NEXT: v_add_u32_e32 v0, vcc, v2, v0 +; VI-NEXT: v_mul_lo_i32 v1, v0, v3 +; VI-NEXT: ; return to shader part epilog +; +; GFX9-LABEL: add3_multiuse_outer: +; GFX9: ; %bb.0: +; GFX9-NEXT: v_add3_u32 v0, v0, v1, v2 +; GFX9-NEXT: v_mul_lo_i32 v1, v0, v3 +; GFX9-NEXT: ; return to shader part epilog + %inner = add i32 %a, %b + %outer = add i32 %inner, %c + %x1 = mul i32 %outer, %x + %r1 = insertelement <2 x i32> undef, i32 %outer, i32 0 + %r0 = insertelement <2 x i32> %r1, i32 %x1, i32 1 + %bc = bitcast <2 x i32> %r0 to <2 x float> + ret <2 x float> %bc +} + +define amdgpu_ps <2 x float> @add3_multiuse_inner(i32 %a, i32 %b, i32 %c) { +; VI-LABEL: add3_multiuse_inner: +; VI: ; %bb.0: +; VI-NEXT: v_add_u32_e32 v0, vcc, v1, v0 +; VI-NEXT: v_add_u32_e32 v1, vcc, v2, v0 +; VI-NEXT: ; return to shader part epilog +; +; GFX9-LABEL: add3_multiuse_inner: +; GFX9: ; %bb.0: +; GFX9-NEXT: v_add_u32_e32 v0, v0, v1 +; GFX9-NEXT: v_add_u32_e32 v1, v0, v2 +; GFX9-NEXT: ; return to shader part epilog + %inner = add i32 %a, %b + %outer = add i32 %inner, %c + %r1 = insertelement <2 x i32> undef, i32 %inner, i32 0 + %r0 = insertelement <2 x i32> %r1, i32 %outer, i32 1 + %bc = bitcast <2 x i32> %r0 to <2 x float> + ret <2 x float> %bc +} + +; A case where uniform values end up in VGPRs -- we could use v_add3_u32 here, +; but we don't. +define amdgpu_ps float @add3_uniform_vgpr(float inreg %a, float inreg %b, float inreg %c) { +; VI-LABEL: add3_uniform_vgpr: +; VI: ; %bb.0: +; VI-NEXT: v_mov_b32_e32 v2, 0x40400000 +; VI-NEXT: v_add_f32_e64 v0, s0, 1.0 +; VI-NEXT: v_add_f32_e64 v1, s1, 2.0 +; VI-NEXT: v_add_f32_e32 v2, s2, v2 +; VI-NEXT: v_add_u32_e32 v0, vcc, v1, v0 +; VI-NEXT: v_add_u32_e32 v0, vcc, v2, v0 +; VI-NEXT: ; return to shader part epilog +; +; GFX9-LABEL: add3_uniform_vgpr: +; GFX9: ; %bb.0: +; GFX9-NEXT: v_mov_b32_e32 v2, 0x40400000 +; GFX9-NEXT: v_add_f32_e64 v0, s0, 1.0 +; GFX9-NEXT: v_add_f32_e64 v1, s1, 2.0 +; GFX9-NEXT: v_add_f32_e32 v2, s2, v2 +; GFX9-NEXT: v_add_u32_e32 v0, v0, v1 +; GFX9-NEXT: v_add_u32_e32 v0, v0, v2 +; GFX9-NEXT: ; return to shader part epilog + %a1 = fadd float %a, 1.0 + %b2 = fadd float %b, 2.0 + %c3 = fadd float %c, 3.0 + %bc.a = bitcast float %a1 to i32 + %bc.b = bitcast float %b2 to i32 + %bc.c = bitcast float %c3 to i32 + %x = add i32 %bc.a, %bc.b + %result = add i32 %x, %bc.c + %bc = bitcast i32 %result to float + ret float %bc +} Index: test/CodeGen/AMDGPU/add_shl.ll =================================================================== --- /dev/null +++ test/CodeGen/AMDGPU/add_shl.ll @@ -0,0 +1,115 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=amdgcn-amd-mesa3d -mcpu=fiji -verify-machineinstrs | FileCheck -check-prefix=VI %s +; RUN: llc < %s -mtriple=amdgcn-amd-mesa3d -mcpu=gfx900 -verify-machineinstrs | FileCheck -check-prefix=GFX9 %s + +; =================================================================================== +; V_ADD_LSHL_U32 +; =================================================================================== + +define amdgpu_ps float @add_shl(i32 %a, i32 %b, i32 %c) { +; VI-LABEL: add_shl: +; VI: ; %bb.0: +; VI-NEXT: v_add_u32_e32 v0, vcc, v1, v0 +; VI-NEXT: v_lshlrev_b32_e32 v0, v2, v0 +; VI-NEXT: ; return to shader part epilog +; +; GFX9-LABEL: add_shl: +; GFX9: ; %bb.0: +; GFX9-NEXT: v_add_lshl_u32 v0, v0, v1, v2 +; GFX9-NEXT: ; return to shader part epilog + %x = add i32 %a, %b + %result = shl i32 %x, %c + %bc = bitcast i32 %result to float + ret float %bc +} + +define amdgpu_ps float @add_shl_vgpr_c(i32 inreg %a, i32 inreg %b, i32 %c) { +; VI-LABEL: add_shl_vgpr_c: +; VI: ; %bb.0: +; VI-NEXT: s_add_i32 s0, s0, s1 +; VI-NEXT: v_lshlrev_b32_e64 v0, v0, s0 +; VI-NEXT: ; return to shader part epilog +; +; GFX9-LABEL: add_shl_vgpr_c: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_add_i32 s0, s0, s1 +; GFX9-NEXT: v_lshlrev_b32_e64 v0, v0, s0 +; GFX9-NEXT: ; return to shader part epilog + %x = add i32 %a, %b + %result = shl i32 %x, %c + %bc = bitcast i32 %result to float + ret float %bc +} + +define amdgpu_ps float @add_shl_vgpr_ac(i32 %a, i32 inreg %b, i32 %c) { +; VI-LABEL: add_shl_vgpr_ac: +; VI: ; %bb.0: +; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0 +; VI-NEXT: v_lshlrev_b32_e32 v0, v1, v0 +; VI-NEXT: ; return to shader part epilog +; +; GFX9-LABEL: add_shl_vgpr_ac: +; GFX9: ; %bb.0: +; GFX9-NEXT: v_add_lshl_u32 v0, v0, s0, v1 +; GFX9-NEXT: ; return to shader part epilog + %x = add i32 %a, %b + %result = shl i32 %x, %c + %bc = bitcast i32 %result to float + ret float %bc +} + +define amdgpu_ps float @add_shl_vgpr_const(i32 %a, i32 %b) { +; VI-LABEL: add_shl_vgpr_const: +; VI: ; %bb.0: +; VI-NEXT: v_add_u32_e32 v0, vcc, v1, v0 +; VI-NEXT: v_lshlrev_b32_e32 v0, 9, v0 +; VI-NEXT: ; return to shader part epilog +; +; GFX9-LABEL: add_shl_vgpr_const: +; GFX9: ; %bb.0: +; GFX9-NEXT: v_add_lshl_u32 v0, v0, v1, 9 +; GFX9-NEXT: ; return to shader part epilog + %x = add i32 %a, %b + %result = shl i32 %x, 9 + %bc = bitcast i32 %result to float + ret float %bc +} + +define amdgpu_ps float @add_shl_vgpr_const_inline_const(i32 %a) { +; VI-LABEL: add_shl_vgpr_const_inline_const: +; VI: ; %bb.0: +; VI-NEXT: v_lshlrev_b32_e32 v0, 9, v0 +; VI-NEXT: v_add_u32_e32 v0, vcc, 0x7e800, v0 +; VI-NEXT: ; return to shader part epilog +; +; GFX9-LABEL: add_shl_vgpr_const_inline_const: +; GFX9: ; %bb.0: +; GFX9-NEXT: v_mov_b32_e32 v1, 0x7e800 +; GFX9-NEXT: v_lshl_add_u32 v0, v0, 9, v1 +; GFX9-NEXT: ; return to shader part epilog + %x = add i32 %a, 1012 + %result = shl i32 %x, 9 + %bc = bitcast i32 %result to float + ret float %bc +} + +; TODO: Non-optimal code generation because SelectionDAG combines +; (shl (add x, CONST), y) ---> (add (shl x, y), CONST'). +; +define amdgpu_ps float @add_shl_vgpr_inline_const_x2(i32 %a) { +; VI-LABEL: add_shl_vgpr_inline_const_x2: +; VI: ; %bb.0: +; VI-NEXT: v_lshlrev_b32_e32 v0, 9, v0 +; VI-NEXT: v_add_u32_e32 v0, vcc, 0x600, v0 +; VI-NEXT: ; return to shader part epilog +; +; GFX9-LABEL: add_shl_vgpr_inline_const_x2: +; GFX9: ; %bb.0: +; GFX9-NEXT: v_mov_b32_e32 v1, 0x600 +; GFX9-NEXT: v_lshl_add_u32 v0, v0, 9, v1 +; GFX9-NEXT: ; return to shader part epilog + %x = add i32 %a, 3 + %result = shl i32 %x, 9 + %bc = bitcast i32 %result to float + ret float %bc +} Index: test/CodeGen/AMDGPU/and_or.ll =================================================================== --- /dev/null +++ test/CodeGen/AMDGPU/and_or.ll @@ -0,0 +1,112 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +;RUN: llc < %s -mtriple=amdgcn-amd-mesa3d -mcpu=fiji -verify-machineinstrs | FileCheck -check-prefix=VI %s +;RUN: llc < %s -mtriple=amdgcn-amd-mesa3d -mcpu=gfx900 -verify-machineinstrs | FileCheck -check-prefix=GFX9 %s + +; =================================================================================== +; V_AND_OR_B32 +; =================================================================================== + +define amdgpu_ps float @and_or(i32 %a, i32 %b, i32 %c) { +; VI-LABEL: and_or: +; VI: ; %bb.0: +; VI-NEXT: v_and_b32_e32 v0, v0, v1 +; VI-NEXT: v_or_b32_e32 v0, v0, v2 +; VI-NEXT: ; return to shader part epilog +; +; GFX9-LABEL: and_or: +; GFX9: ; %bb.0: +; GFX9-NEXT: v_and_or_b32 v0, v0, v1, v2 +; GFX9-NEXT: ; return to shader part epilog + %x = and i32 %a, %b + %result = or i32 %x, %c + %bc = bitcast i32 %result to float + ret float %bc +} + +; ThreeOp instruction variant not used due to Constant Bus Limitations +define amdgpu_ps float @and_or_vgpr_b(i32 inreg %a, i32 %b, i32 inreg %c) { +; VI-LABEL: and_or_vgpr_b: +; VI: ; %bb.0: +; VI-NEXT: v_and_b32_e32 v0, s0, v0 +; VI-NEXT: v_or_b32_e32 v0, s1, v0 +; VI-NEXT: ; return to shader part epilog +; +; GFX9-LABEL: and_or_vgpr_b: +; GFX9: ; %bb.0: +; GFX9-NEXT: v_and_b32_e32 v0, s0, v0 +; GFX9-NEXT: v_or_b32_e32 v0, s1, v0 +; GFX9-NEXT: ; return to shader part epilog + %x = and i32 %a, %b + %result = or i32 %x, %c + %bc = bitcast i32 %result to float + ret float %bc +} + +define amdgpu_ps float @and_or_vgpr_ab(i32 %a, i32 %b, i32 inreg %c) { +; VI-LABEL: and_or_vgpr_ab: +; VI: ; %bb.0: +; VI-NEXT: v_and_b32_e32 v0, v0, v1 +; VI-NEXT: v_or_b32_e32 v0, s0, v0 +; VI-NEXT: ; return to shader part epilog +; +; GFX9-LABEL: and_or_vgpr_ab: +; GFX9: ; %bb.0: +; GFX9-NEXT: v_and_or_b32 v0, v0, v1, s0 +; GFX9-NEXT: ; return to shader part epilog + %x = and i32 %a, %b + %result = or i32 %x, %c + %bc = bitcast i32 %result to float + ret float %bc +} + +define amdgpu_ps float @and_or_vgpr_const(i32 %a, i32 %b) { +; VI-LABEL: and_or_vgpr_const: +; VI: ; %bb.0: +; VI-NEXT: v_and_b32_e32 v0, 4, v0 +; VI-NEXT: v_or_b32_e32 v0, v0, v1 +; VI-NEXT: ; return to shader part epilog +; +; GFX9-LABEL: and_or_vgpr_const: +; GFX9: ; %bb.0: +; GFX9-NEXT: v_and_or_b32 v0, v0, 4, v1 +; GFX9-NEXT: ; return to shader part epilog + %x = and i32 4, %a + %result = or i32 %x, %b + %bc = bitcast i32 %result to float + ret float %bc +} + +define amdgpu_ps float @and_or_vgpr_const_inline_const(i32 %a) { +; VI-LABEL: and_or_vgpr_const_inline_const: +; VI: ; %bb.0: +; VI-NEXT: v_and_b32_e32 v0, 20, v0 +; VI-NEXT: v_or_b32_e32 v0, 0x808, v0 +; VI-NEXT: ; return to shader part epilog +; +; GFX9-LABEL: and_or_vgpr_const_inline_const: +; GFX9: ; %bb.0: +; GFX9-NEXT: v_mov_b32_e32 v1, 0x808 +; GFX9-NEXT: v_and_or_b32 v0, v0, 20, v1 +; GFX9-NEXT: ; return to shader part epilog + %x = and i32 20, %a + %result = or i32 %x, 2056 + %bc = bitcast i32 %result to float + ret float %bc +} + +define amdgpu_ps float @and_or_vgpr_inline_const_x2(i32 %a) { +; VI-LABEL: and_or_vgpr_inline_const_x2: +; VI: ; %bb.0: +; VI-NEXT: v_and_b32_e32 v0, 4, v0 +; VI-NEXT: v_or_b32_e32 v0, 1, v0 +; VI-NEXT: ; return to shader part epilog +; +; GFX9-LABEL: and_or_vgpr_inline_const_x2: +; GFX9: ; %bb.0: +; GFX9-NEXT: v_and_or_b32 v0, v0, 4, 1 +; GFX9-NEXT: ; return to shader part epilog + %x = and i32 4, %a + %result = or i32 %x, 1 + %bc = bitcast i32 %result to float + ret float %bc +} Index: test/CodeGen/AMDGPU/or3.ll =================================================================== --- /dev/null +++ test/CodeGen/AMDGPU/or3.ll @@ -0,0 +1,95 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=amdgcn-amd-mesa3d -mcpu=fiji -verify-machineinstrs | FileCheck -check-prefix=VI %s +; RUN: llc < %s -mtriple=amdgcn-amd-mesa3d -mcpu=gfx900 -verify-machineinstrs | FileCheck -check-prefix=GFX9 %s + +; =================================================================================== +; V_OR3_B32 +; =================================================================================== + +define amdgpu_ps float @or3(i32 %a, i32 %b, i32 %c) { +; VI-LABEL: or3: +; VI: ; %bb.0: +; VI-NEXT: v_or_b32_e32 v0, v0, v1 +; VI-NEXT: v_or_b32_e32 v0, v0, v2 +; VI-NEXT: ; return to shader part epilog +; +; GFX9-LABEL: or3: +; GFX9: ; %bb.0: +; GFX9-NEXT: v_or3_b32 v0, v0, v1, v2 +; GFX9-NEXT: ; return to shader part epilog + %x = or i32 %a, %b + %result = or i32 %x, %c + %bc = bitcast i32 %result to float + ret float %bc +} + +; ThreeOp instruction variant not used due to Constant Bus Limitations +; TODO: with reassociation it is possible to replace a v_or_b32_e32 with an s_or_b32 +define amdgpu_ps float @or3_vgpr_a(i32 %a, i32 inreg %b, i32 inreg %c) { +; VI-LABEL: or3_vgpr_a: +; VI: ; %bb.0: +; VI-NEXT: v_or_b32_e32 v0, s0, v0 +; VI-NEXT: v_or_b32_e32 v0, s1, v0 +; VI-NEXT: ; return to shader part epilog +; +; GFX9-LABEL: or3_vgpr_a: +; GFX9: ; %bb.0: +; GFX9-NEXT: v_or_b32_e32 v0, s0, v0 +; GFX9-NEXT: v_or_b32_e32 v0, s1, v0 +; GFX9-NEXT: ; return to shader part epilog + %x = or i32 %a, %b + %result = or i32 %x, %c + %bc = bitcast i32 %result to float + ret float %bc +} + +define amdgpu_ps float @or3_vgpr_all2(i32 %a, i32 %b, i32 %c) { +; VI-LABEL: or3_vgpr_all2: +; VI: ; %bb.0: +; VI-NEXT: v_or_b32_e32 v1, v1, v2 +; VI-NEXT: v_or_b32_e32 v0, v0, v1 +; VI-NEXT: ; return to shader part epilog +; +; GFX9-LABEL: or3_vgpr_all2: +; GFX9: ; %bb.0: +; GFX9-NEXT: v_or3_b32 v0, v1, v2, v0 +; GFX9-NEXT: ; return to shader part epilog + %x = or i32 %b, %c + %result = or i32 %a, %x + %bc = bitcast i32 %result to float + ret float %bc +} + +define amdgpu_ps float @or3_vgpr_bc(i32 inreg %a, i32 %b, i32 %c) { +; VI-LABEL: or3_vgpr_bc: +; VI: ; %bb.0: +; VI-NEXT: v_or_b32_e32 v0, s0, v0 +; VI-NEXT: v_or_b32_e32 v0, v0, v1 +; VI-NEXT: ; return to shader part epilog +; +; GFX9-LABEL: or3_vgpr_bc: +; GFX9: ; %bb.0: +; GFX9-NEXT: v_or3_b32 v0, s0, v0, v1 +; GFX9-NEXT: ; return to shader part epilog + %x = or i32 %a, %b + %result = or i32 %x, %c + %bc = bitcast i32 %result to float + ret float %bc +} + +define amdgpu_ps float @or3_vgpr_const(i32 %a, i32 %b) { +; VI-LABEL: or3_vgpr_const: +; VI: ; %bb.0: +; VI-NEXT: v_or_b32_e32 v0, v1, v0 +; VI-NEXT: v_or_b32_e32 v0, 64, v0 +; VI-NEXT: ; return to shader part epilog +; +; GFX9-LABEL: or3_vgpr_const: +; GFX9: ; %bb.0: +; GFX9-NEXT: v_or3_b32 v0, v1, v0, 64 +; GFX9-NEXT: ; return to shader part epilog + %x = or i32 64, %b + %result = or i32 %x, %a + %bc = bitcast i32 %result to float + ret float %bc +} Index: test/CodeGen/AMDGPU/shl_add.ll =================================================================== --- /dev/null +++ test/CodeGen/AMDGPU/shl_add.ll @@ -0,0 +1,94 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=amdgcn-amd-mesa3d -mcpu=fiji -verify-machineinstrs | FileCheck -check-prefix=VI %s +; RUN: llc < %s -mtriple=amdgcn-amd-mesa3d -mcpu=gfx900 -verify-machineinstrs | FileCheck -check-prefix=GFX9 %s + +; =================================================================================== +; V_LSHL_ADD_U32 +; =================================================================================== + +define amdgpu_ps float @shl_add(i32 %a, i32 %b, i32 %c) { +; VI-LABEL: shl_add: +; VI: ; %bb.0: +; VI-NEXT: v_lshlrev_b32_e32 v0, v1, v0 +; VI-NEXT: v_add_u32_e32 v0, vcc, v2, v0 +; VI-NEXT: ; return to shader part epilog +; +; GFX9-LABEL: shl_add: +; GFX9: ; %bb.0: +; GFX9-NEXT: v_lshl_add_u32 v0, v0, v1, v2 +; GFX9-NEXT: ; return to shader part epilog + %x = shl i32 %a, %b + %result = add i32 %x, %c + %bc = bitcast i32 %result to float + ret float %bc +} + +; ThreeOp instruction variant not used due to Constant Bus Limitations +define amdgpu_ps float @shl_add_vgpr_a(i32 %a, i32 inreg %b, i32 inreg %c) { +; VI-LABEL: shl_add_vgpr_a: +; VI: ; %bb.0: +; VI-NEXT: v_lshlrev_b32_e32 v0, s0, v0 +; VI-NEXT: v_add_u32_e32 v0, vcc, s1, v0 +; VI-NEXT: ; return to shader part epilog +; +; GFX9-LABEL: shl_add_vgpr_a: +; GFX9: ; %bb.0: +; GFX9-NEXT: v_lshlrev_b32_e32 v0, s0, v0 +; GFX9-NEXT: v_add_u32_e32 v0, s1, v0 +; GFX9-NEXT: ; return to shader part epilog + %x = shl i32 %a, %b + %result = add i32 %x, %c + %bc = bitcast i32 %result to float + ret float %bc +} + +define amdgpu_ps float @shl_add_vgpr_all(i32 %a, i32 %b, i32 %c) { +; VI-LABEL: shl_add_vgpr_all: +; VI: ; %bb.0: +; VI-NEXT: v_lshlrev_b32_e32 v0, v1, v0 +; VI-NEXT: v_add_u32_e32 v0, vcc, v2, v0 +; VI-NEXT: ; return to shader part epilog +; +; GFX9-LABEL: shl_add_vgpr_all: +; GFX9: ; %bb.0: +; GFX9-NEXT: v_lshl_add_u32 v0, v0, v1, v2 +; GFX9-NEXT: ; return to shader part epilog + %x = shl i32 %a, %b + %result = add i32 %x, %c + %bc = bitcast i32 %result to float + ret float %bc +} + +define amdgpu_ps float @shl_add_vgpr_ab(i32 %a, i32 %b, i32 inreg %c) { +; VI-LABEL: shl_add_vgpr_ab: +; VI: ; %bb.0: +; VI-NEXT: v_lshlrev_b32_e32 v0, v1, v0 +; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0 +; VI-NEXT: ; return to shader part epilog +; +; GFX9-LABEL: shl_add_vgpr_ab: +; GFX9: ; %bb.0: +; GFX9-NEXT: v_lshl_add_u32 v0, v0, v1, s0 +; GFX9-NEXT: ; return to shader part epilog + %x = shl i32 %a, %b + %result = add i32 %x, %c + %bc = bitcast i32 %result to float + ret float %bc +} + +define amdgpu_ps float @shl_add_vgpr_const(i32 %a, i32 %b) { +; VI-LABEL: shl_add_vgpr_const: +; VI: ; %bb.0: +; VI-NEXT: v_lshlrev_b32_e32 v0, 3, v0 +; VI-NEXT: v_add_u32_e32 v0, vcc, v1, v0 +; VI-NEXT: ; return to shader part epilog +; +; GFX9-LABEL: shl_add_vgpr_const: +; GFX9: ; %bb.0: +; GFX9-NEXT: v_lshl_add_u32 v0, v0, 3, v1 +; GFX9-NEXT: ; return to shader part epilog + %x = shl i32 %a, 3 + %result = add i32 %x, %b + %bc = bitcast i32 %result to float + ret float %bc +} Index: test/CodeGen/AMDGPU/shl_or.ll =================================================================== --- /dev/null +++ test/CodeGen/AMDGPU/shl_or.ll @@ -0,0 +1,144 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=amdgcn-amd-mesa3d -mcpu=fiji -verify-machineinstrs | FileCheck -check-prefix=VI %s +; RUN: llc < %s -mtriple=amdgcn-amd-mesa3d -mcpu=gfx900 -verify-machineinstrs | FileCheck -check-prefix=GFX9 %s + +; =================================================================================== +; V_LSHL_OR_B32 +; =================================================================================== + +define amdgpu_ps float @shl_or(i32 %a, i32 %b, i32 %c) { +; VI-LABEL: shl_or: +; VI: ; %bb.0: +; VI-NEXT: v_lshlrev_b32_e32 v0, v1, v0 +; VI-NEXT: v_or_b32_e32 v0, v0, v2 +; VI-NEXT: ; return to shader part epilog +; +; GFX9-LABEL: shl_or: +; GFX9: ; %bb.0: +; GFX9-NEXT: v_lshl_or_b32 v0, v0, v1, v2 +; GFX9-NEXT: ; return to shader part epilog + %x = shl i32 %a, %b + %result = or i32 %x, %c + %bc = bitcast i32 %result to float + ret float %bc +} + +define amdgpu_ps float @shl_or_vgpr_c(i32 inreg %a, i32 inreg %b, i32 %c) { +; VI-LABEL: shl_or_vgpr_c: +; VI: ; %bb.0: +; VI-NEXT: s_lshl_b32 s0, s0, s1 +; VI-NEXT: v_or_b32_e32 v0, s0, v0 +; VI-NEXT: ; return to shader part epilog +; +; GFX9-LABEL: shl_or_vgpr_c: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_lshl_b32 s0, s0, s1 +; GFX9-NEXT: v_or_b32_e32 v0, s0, v0 +; GFX9-NEXT: ; return to shader part epilog + %x = shl i32 %a, %b + %result = or i32 %x, %c + %bc = bitcast i32 %result to float + ret float %bc +} + +define amdgpu_ps float @shl_or_vgpr_all2(i32 %a, i32 %b, i32 %c) { +; VI-LABEL: shl_or_vgpr_all2: +; VI: ; %bb.0: +; VI-NEXT: v_lshlrev_b32_e32 v0, v1, v0 +; VI-NEXT: v_or_b32_e32 v0, v2, v0 +; VI-NEXT: ; return to shader part epilog +; +; GFX9-LABEL: shl_or_vgpr_all2: +; GFX9: ; %bb.0: +; GFX9-NEXT: v_lshl_or_b32 v0, v0, v1, v2 +; GFX9-NEXT: ; return to shader part epilog + %x = shl i32 %a, %b + %result = or i32 %c, %x + %bc = bitcast i32 %result to float + ret float %bc +} + +define amdgpu_ps float @shl_or_vgpr_ac(i32 %a, i32 inreg %b, i32 %c) { +; VI-LABEL: shl_or_vgpr_ac: +; VI: ; %bb.0: +; VI-NEXT: v_lshlrev_b32_e32 v0, s0, v0 +; VI-NEXT: v_or_b32_e32 v0, v0, v1 +; VI-NEXT: ; return to shader part epilog +; +; GFX9-LABEL: shl_or_vgpr_ac: +; GFX9: ; %bb.0: +; GFX9-NEXT: v_lshl_or_b32 v0, v0, s0, v1 +; GFX9-NEXT: ; return to shader part epilog + %x = shl i32 %a, %b + %result = or i32 %x, %c + %bc = bitcast i32 %result to float + ret float %bc +} + +define amdgpu_ps float @shl_or_vgpr_const(i32 %a, i32 %b) { +; VI-LABEL: shl_or_vgpr_const: +; VI: ; %bb.0: +; VI-NEXT: v_lshlrev_b32_e32 v0, v1, v0 +; VI-NEXT: v_or_b32_e32 v0, 6, v0 +; VI-NEXT: ; return to shader part epilog +; +; GFX9-LABEL: shl_or_vgpr_const: +; GFX9: ; %bb.0: +; GFX9-NEXT: v_lshl_or_b32 v0, v0, v1, 6 +; GFX9-NEXT: ; return to shader part epilog + %x = shl i32 %a, %b + %result = or i32 %x, 6 + %bc = bitcast i32 %result to float + ret float %bc +} + +define amdgpu_ps float @shl_or_vgpr_const2(i32 %a, i32 %b) { +; VI-LABEL: shl_or_vgpr_const2: +; VI: ; %bb.0: +; VI-NEXT: v_lshlrev_b32_e32 v0, 6, v0 +; VI-NEXT: v_or_b32_e32 v0, v0, v1 +; VI-NEXT: ; return to shader part epilog +; +; GFX9-LABEL: shl_or_vgpr_const2: +; GFX9: ; %bb.0: +; GFX9-NEXT: v_lshl_or_b32 v0, v0, 6, v1 +; GFX9-NEXT: ; return to shader part epilog + %x = shl i32 %a, 6 + %result = or i32 %x, %b + %bc = bitcast i32 %result to float + ret float %bc +} + +define amdgpu_ps float @shl_or_vgpr_const_scalar1(i32 inreg %a, i32 %b) { +; VI-LABEL: shl_or_vgpr_const_scalar1: +; VI: ; %bb.0: +; VI-NEXT: s_lshl_b32 s0, s0, 6 +; VI-NEXT: v_or_b32_e32 v0, s0, v0 +; VI-NEXT: ; return to shader part epilog +; +; GFX9-LABEL: shl_or_vgpr_const_scalar1: +; GFX9: ; %bb.0: +; GFX9-NEXT: v_lshl_or_b32 v0, s0, 6, v0 +; GFX9-NEXT: ; return to shader part epilog + %x = shl i32 %a, 6 + %result = or i32 %x, %b + %bc = bitcast i32 %result to float + ret float %bc +} + +define amdgpu_ps float @shl_or_vgpr_const_scalar2(i32 %a, i32 inreg %b) { +; VI-LABEL: shl_or_vgpr_const_scalar2: +; VI: ; %bb.0: +; VI-NEXT: v_lshlrev_b32_e32 v0, 6, v0 +; VI-NEXT: v_or_b32_e32 v0, s0, v0 +; VI-NEXT: ; return to shader part epilog +; +; GFX9-LABEL: shl_or_vgpr_const_scalar2: +; GFX9: ; %bb.0: +; GFX9-NEXT: v_lshl_or_b32 v0, v0, 6, s0 +; GFX9-NEXT: ; return to shader part epilog + %x = shl i32 %a, 6 + %result = or i32 %x, %b + %bc = bitcast i32 %result to float + ret float %bc +} Index: test/CodeGen/AMDGPU/sibling-call.ll =================================================================== --- test/CodeGen/AMDGPU/sibling-call.ll +++ test/CodeGen/AMDGPU/sibling-call.ll @@ -136,8 +136,7 @@ ; GFX9-NEXT: v_add_u32_e32 v0, v0, v1 -; GFX9: v_add_u32_e32 v0, v0, [[LOAD_0]] -; GFX9: v_add_u32_e32 v0, v0, [[LOAD_1]] +; GFX9: v_add3_u32 v0, v0, v3, v2 ; GCN-NEXT: s_setpc_b64 define fastcc i32 @i32_fastcc_i32_i32_a32i32(i32 %arg0, i32 %arg1, [32 x i32] %large) #1 {