Index: llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.td =================================================================== --- llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.td +++ llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.td @@ -469,6 +469,6 @@ [(int_amdgcn_ubfe node:$src0, node:$src1, node:$src2), (AMDGPUbfe_u32_impl node:$src0, node:$src1, node:$src2)]>; -def AMDGPUfdot2 : PatFrags<(ops node:$src0, node:$src1, node:$src2, node:$src3), - [(int_amdgcn_fdot2 node:$src0, node:$src1, node:$src2, node:$src3), - (AMDGPUfdot2_impl node:$src0, node:$src1, node:$src2, node:$src3)]>; +def AMDGPUfdot2 : PatFrags<(ops node:$src0, node:$src1, node:$src2, node:$clamp), + [(int_amdgcn_fdot2 node:$src0, node:$src1, node:$src2, node:$clamp), + (AMDGPUfdot2_impl node:$src0, node:$src1, node:$src2, node:$clamp)]>; Index: llvm/lib/Target/AMDGPU/VOP3Instructions.td =================================================================== --- llvm/lib/Target/AMDGPU/VOP3Instructions.td +++ llvm/lib/Target/AMDGPU/VOP3Instructions.td @@ -32,18 +32,26 @@ ret1)); } -class getVOP3PModPat { +class getVOP3PModPat { + dag src0_dag = (P.Src0VT (VOP3PMods P.Src0VT:$src0, i32:$src0_modifiers)); + dag src1_dag = (P.Src1VT (VOP3PMods P.Src1VT:$src1, i32:$src1_modifiers)); + dag src2_dag = (P.Src2VT (VOP3PMods P.Src2VT:$src2, i32:$src2_modifiers)); + dag clamp_dag = (i1 timm:$clamp); + list ret3 = [(set P.DstVT:$vdst, - (DivergentFragOrOp.ret (P.Src0VT (VOP3PMods P.Src0VT:$src0, i32:$src0_modifiers)), - (P.Src1VT (VOP3PMods P.Src1VT:$src1, i32:$src1_modifiers)), - (P.Src2VT (VOP3PMods P.Src2VT:$src2, i32:$src2_modifiers))))]; + !if(HasExplicitClamp, + (DivergentFragOrOp.ret src0_dag, src1_dag, src2_dag, clamp_dag), + (DivergentFragOrOp.ret src0_dag, src1_dag, src2_dag)))]; list ret2 = [(set P.DstVT:$vdst, - (DivergentFragOrOp.ret (P.Src0VT (VOP3PMods P.Src0VT:$src0, i32:$src0_modifiers)), - (P.Src1VT (VOP3PMods P.Src1VT:$src1, i32:$src1_modifiers))))]; + !if(HasExplicitClamp, + (DivergentFragOrOp.ret src0_dag, src1_dag, clamp_dag), + (DivergentFragOrOp.ret src0_dag, src1_dag)))]; list ret1 = [(set P.DstVT:$vdst, - (DivergentFragOrOp.ret (P.Src0VT (VOP3PMods P.Src0VT:$src0, i32:$src0_modifiers))))]; + !if(HasExplicitClamp, + (DivergentFragOrOp.ret src0_dag, clamp_dag), + (DivergentFragOrOp.ret src0_dag)))]; list ret = !if(!eq(P.NumSrcArgs, 3), ret3, !if(!eq(P.NumSrcArgs, 2), ret2, Index: llvm/lib/Target/AMDGPU/VOP3PInstructions.td =================================================================== --- llvm/lib/Target/AMDGPU/VOP3PInstructions.td +++ llvm/lib/Target/AMDGPU/VOP3PInstructions.td @@ -10,9 +10,11 @@ // VOP3P Classes //===----------------------------------------------------------------------===// -class VOP3PInst : +class VOP3PInst : VOP3P_Pseudo.ret, getVOP3Pat.ret) + !if(P.HasModifiers, getVOP3PModPat.ret, getVOP3Pat.ret) >; // Non-packed instructions that use the VOP3P encoding. @@ -269,42 +271,30 @@ let IsDOT = 1 in { let SubtargetPredicate = HasDot2Insts in { -def V_DOT2_F32_F16 : VOP3PInst<"v_dot2_f32_f16", VOP3_Profile>; -def V_DOT2_I32_I16 : VOP3PInst<"v_dot2_i32_i16", VOP3_Profile>; -def V_DOT2_U32_U16 : VOP3PInst<"v_dot2_u32_u16", VOP3_Profile>; -def V_DOT4_U32_U8 : VOP3PInst<"v_dot4_u32_u8", VOP3_Profile>; -def V_DOT8_U32_U4 : VOP3PInst<"v_dot8_u32_u4", VOP3_Profile>; +def V_DOT2_F32_F16 : VOP3PInst<"v_dot2_f32_f16", + VOP3_Profile, + AMDGPUfdot2, 1/*ExplicitClamp*/>; +def V_DOT2_I32_I16 : VOP3PInst<"v_dot2_i32_i16", + VOP3_Profile, int_amdgcn_sdot2, 1>; +def V_DOT2_U32_U16 : VOP3PInst<"v_dot2_u32_u16", + VOP3_Profile, int_amdgcn_udot2, 1>; +def V_DOT4_U32_U8 : VOP3PInst<"v_dot4_u32_u8", + VOP3_Profile, int_amdgcn_udot4, 1>; +def V_DOT8_U32_U4 : VOP3PInst<"v_dot8_u32_u4", + VOP3_Profile, int_amdgcn_udot8, 1>; } // End SubtargetPredicate = HasDot2Insts let SubtargetPredicate = HasDot1Insts in { -def V_DOT4_I32_I8 : VOP3PInst<"v_dot4_i32_i8", VOP3_Profile>; -def V_DOT8_I32_I4 : VOP3PInst<"v_dot8_i32_i4", VOP3_Profile>; +def V_DOT4_I32_I8 : VOP3PInst<"v_dot4_i32_i8", + VOP3_Profile, int_amdgcn_sdot4, 1>; +def V_DOT8_I32_I4 : VOP3PInst<"v_dot8_i32_i4", + VOP3_Profile, int_amdgcn_sdot8, 1>; } // End SubtargetPredicate = HasDot1Insts } // End let IsDOT = 1 -multiclass DotPats { - let SubtargetPredicate = dot_inst.SubtargetPredicate in - def : GCNPat < - (dot_op (dot_inst.Pfl.Src0VT (VOP3PMods dot_inst.Pfl.Src0VT:$src0, i32:$src0_modifiers)), - (dot_inst.Pfl.Src1VT (VOP3PMods dot_inst.Pfl.Src1VT:$src1, i32:$src1_modifiers)), - (dot_inst.Pfl.Src2VT (VOP3PMods dot_inst.Pfl.Src2VT:$src2, i32:$src2_modifiers)), timm:$clamp), - (dot_inst $src0_modifiers, VSrc_v2f16:$src0, - $src1_modifiers, VSrc_v2f16:$src1, - $src2_modifiers, VSrc_f32:$src2, timm:$clamp)>; -} - -defm : DotPats; -defm : DotPats; -defm : DotPats; -defm : DotPats; -defm : DotPats; -defm : DotPats; -defm : DotPats; - def : UDot2Pat; def : SDot2Pat; Index: llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.sdot2.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.sdot2.ll @@ -0,0 +1,388 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -global-isel -march=amdgcn -mcpu=gfx906 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX906 %s +; RUN: llc -global-isel -march=amdgcn -mcpu=gfx908 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX908 %s +; RUN: llc -global-isel -march=amdgcn -mcpu=gfx1011 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX10 %s +; RUN: llc -global-isel -march=amdgcn -mcpu=gfx1012 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX10 %s + +define i32 @v_sdot2(<2 x i16> %a, <2 x i16> %b, i32 %c) { +; GFX906-LABEL: v_sdot2: +; GFX906: ; %bb.0: +; GFX906-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX906-NEXT: v_dot2_i32_i16 v0, v0, v1, v2 +; GFX906-NEXT: s_setpc_b64 s[30:31] +; +; GFX908-LABEL: v_sdot2: +; GFX908: ; %bb.0: +; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX908-NEXT: v_dot2_i32_i16 v0, v0, v1, v2 +; GFX908-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_sdot2: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: v_dot2_i32_i16 v0, v0, v1, v2 +; GFX10-NEXT: ; implicit-def: $vcc_hi +; GFX10-NEXT: s_setpc_b64 s[30:31] + %r = call i32 @llvm.amdgcn.sdot2(<2 x i16> %a, <2 x i16> %b, i32 %c, i1 false) + ret i32 %r +} + +define i32 @v_sdot2_clamp(<2 x i16> %a, <2 x i16> %b, i32 %c) { +; GFX906-LABEL: v_sdot2_clamp: +; GFX906: ; %bb.0: +; GFX906-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX906-NEXT: v_dot2_i32_i16 v0, v0, v1, v2 clamp +; GFX906-NEXT: s_setpc_b64 s[30:31] +; +; GFX908-LABEL: v_sdot2_clamp: +; GFX908: ; %bb.0: +; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX908-NEXT: v_dot2_i32_i16 v0, v0, v1, v2 clamp +; GFX908-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_sdot2_clamp: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: v_dot2_i32_i16 v0, v0, v1, v2 clamp +; GFX10-NEXT: ; implicit-def: $vcc_hi +; GFX10-NEXT: s_setpc_b64 s[30:31] + %r = call i32 @llvm.amdgcn.sdot2(<2 x i16> %a, <2 x i16> %b, i32 %c, i1 true) + ret i32 %r +} + +define amdgpu_ps float @v_sdot2_sgpr_sgpr_sgpr(<2 x i16> inreg %a, <2 x i16> inreg %b, i32 inreg %c) { +; GFX906-LABEL: v_sdot2_sgpr_sgpr_sgpr: +; GFX906: ; %bb.0: +; GFX906-NEXT: v_mov_b32_e32 v0, s1 +; GFX906-NEXT: v_mov_b32_e32 v1, s2 +; GFX906-NEXT: v_dot2_i32_i16 v0, s0, v0, v1 +; GFX906-NEXT: ; return to shader part epilog +; +; GFX908-LABEL: v_sdot2_sgpr_sgpr_sgpr: +; GFX908: ; %bb.0: +; GFX908-NEXT: v_mov_b32_e32 v0, s1 +; GFX908-NEXT: v_mov_b32_e32 v1, s2 +; GFX908-NEXT: v_dot2_i32_i16 v0, s0, v0, v1 +; GFX908-NEXT: ; return to shader part epilog +; +; GFX10-LABEL: v_sdot2_sgpr_sgpr_sgpr: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_mov_b32_e32 v0, s2 +; GFX10-NEXT: ; implicit-def: $vcc_hi +; GFX10-NEXT: v_dot2_i32_i16 v0, s0, s1, v0 +; GFX10-NEXT: ; return to shader part epilog + %r = call i32 @llvm.amdgcn.sdot2(<2 x i16> %a, <2 x i16> %b, i32 %c, i1 false) + %cast = bitcast i32 %r to float + ret float %cast +} + +define i32 @v_sdot2_inline_literal_a(<2 x i16> %b, i32 %c) { +; GFX906-LABEL: v_sdot2_inline_literal_a: +; GFX906: ; %bb.0: +; GFX906-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX906-NEXT: s_pack_ll_b32_b16 s4, 4, 4 +; GFX906-NEXT: v_dot2_i32_i16 v0, s4, v0, v1 +; GFX906-NEXT: s_setpc_b64 s[30:31] +; +; GFX908-LABEL: v_sdot2_inline_literal_a: +; GFX908: ; %bb.0: +; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX908-NEXT: s_pack_ll_b32_b16 s4, 4, 4 +; GFX908-NEXT: v_dot2_i32_i16 v0, s4, v0, v1 +; GFX908-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_sdot2_inline_literal_a: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: s_pack_ll_b32_b16 s4, 4, 4 +; GFX10-NEXT: ; implicit-def: $vcc_hi +; GFX10-NEXT: v_dot2_i32_i16 v0, s4, v0, v1 +; GFX10-NEXT: s_setpc_b64 s[30:31] + %r = call i32 @llvm.amdgcn.sdot2(<2 x i16> , <2 x i16> %b, i32 %c, i1 false) + ret i32 %r +} + +define i32 @v_sdot2_inline_literal_b(<2 x i16> %a, i32 %c) { +; GFX906-LABEL: v_sdot2_inline_literal_b: +; GFX906: ; %bb.0: +; GFX906-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX906-NEXT: s_pack_ll_b32_b16 s4, 4, 4 +; GFX906-NEXT: v_dot2_i32_i16 v0, v0, s4, v1 +; GFX906-NEXT: s_setpc_b64 s[30:31] +; +; GFX908-LABEL: v_sdot2_inline_literal_b: +; GFX908: ; %bb.0: +; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX908-NEXT: s_pack_ll_b32_b16 s4, 4, 4 +; GFX908-NEXT: v_dot2_i32_i16 v0, v0, s4, v1 +; GFX908-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_sdot2_inline_literal_b: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: s_pack_ll_b32_b16 s4, 4, 4 +; GFX10-NEXT: ; implicit-def: $vcc_hi +; GFX10-NEXT: v_dot2_i32_i16 v0, v0, s4, v1 +; GFX10-NEXT: s_setpc_b64 s[30:31] + %r = call i32 @llvm.amdgcn.sdot2(<2 x i16> %a, <2 x i16> , i32 %c, i1 false) + ret i32 %r +} + +define i32 @v_sdot2_inline_literal_a_b(<2 x i16> %a, i32 %c) { +; GFX906-LABEL: v_sdot2_inline_literal_a_b: +; GFX906: ; %bb.0: +; GFX906-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX906-NEXT: s_pack_ll_b32_b16 s4, 4, 4 +; GFX906-NEXT: s_pack_ll_b32_b16 s5, 8, 8 +; GFX906-NEXT: v_mov_b32_e32 v0, s4 +; GFX906-NEXT: v_dot2_i32_i16 v0, s5, v0, v1 +; GFX906-NEXT: s_setpc_b64 s[30:31] +; +; GFX908-LABEL: v_sdot2_inline_literal_a_b: +; GFX908: ; %bb.0: +; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX908-NEXT: s_pack_ll_b32_b16 s4, 4, 4 +; GFX908-NEXT: s_pack_ll_b32_b16 s5, 8, 8 +; GFX908-NEXT: v_mov_b32_e32 v0, s4 +; GFX908-NEXT: v_dot2_i32_i16 v0, s5, v0, v1 +; GFX908-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_sdot2_inline_literal_a_b: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: s_pack_ll_b32_b16 s4, 8, 8 +; GFX10-NEXT: s_pack_ll_b32_b16 s5, 4, 4 +; GFX10-NEXT: ; implicit-def: $vcc_hi +; GFX10-NEXT: v_dot2_i32_i16 v0, s4, s5, v1 +; GFX10-NEXT: s_setpc_b64 s[30:31] + %r = call i32 @llvm.amdgcn.sdot2(<2 x i16> , <2 x i16> , i32 %c, i1 false) + ret i32 %r +} + +define i32 @v_sdot2_inline_literal_a_b_c() { +; GFX906-LABEL: v_sdot2_inline_literal_a_b_c: +; GFX906: ; %bb.0: +; GFX906-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX906-NEXT: s_pack_ll_b32_b16 s4, 4, 4 +; GFX906-NEXT: s_pack_ll_b32_b16 s5, 8, 8 +; GFX906-NEXT: v_mov_b32_e32 v0, s4 +; GFX906-NEXT: v_dot2_i32_i16 v0, s5, v0, 8 +; GFX906-NEXT: s_setpc_b64 s[30:31] +; +; GFX908-LABEL: v_sdot2_inline_literal_a_b_c: +; GFX908: ; %bb.0: +; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX908-NEXT: s_pack_ll_b32_b16 s4, 4, 4 +; GFX908-NEXT: s_pack_ll_b32_b16 s5, 8, 8 +; GFX908-NEXT: v_mov_b32_e32 v0, s4 +; GFX908-NEXT: v_dot2_i32_i16 v0, s5, v0, 8 +; GFX908-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_sdot2_inline_literal_a_b_c: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: s_pack_ll_b32_b16 s4, 8, 8 +; GFX10-NEXT: s_pack_ll_b32_b16 s5, 4, 4 +; GFX10-NEXT: ; implicit-def: $vcc_hi +; GFX10-NEXT: v_dot2_i32_i16 v0, s4, s5, 8 +; GFX10-NEXT: s_setpc_b64 s[30:31] + %r = call i32 @llvm.amdgcn.sdot2(<2 x i16> , <2 x i16> , i32 8, i1 false) + ret i32 %r +} + +define i32 @v_sdot2_inline_literal_c(<2 x i16> %a, <2 x i16> %b) { +; GFX906-LABEL: v_sdot2_inline_literal_c: +; GFX906: ; %bb.0: +; GFX906-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX906-NEXT: v_dot2_i32_i16 v0, v0, v1, 7 +; GFX906-NEXT: s_setpc_b64 s[30:31] +; +; GFX908-LABEL: v_sdot2_inline_literal_c: +; GFX908: ; %bb.0: +; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX908-NEXT: v_dot2_i32_i16 v0, v0, v1, 7 +; GFX908-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_sdot2_inline_literal_c: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: v_dot2_i32_i16 v0, v0, v1, 7 +; GFX10-NEXT: ; implicit-def: $vcc_hi +; GFX10-NEXT: s_setpc_b64 s[30:31] + %r = call i32 @llvm.amdgcn.sdot2(<2 x i16> %a, <2 x i16> %b, i32 7, i1 false) + ret i32 %r +} + +define i32 @v_sdot2_fneg_a(<2 x half> %a, <2 x i16> %b, i32 %c) { +; GFX906-LABEL: v_sdot2_fneg_a: +; GFX906: ; %bb.0: +; GFX906-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX906-NEXT: v_dot2_i32_i16 v0, v0, v1, v2 neg_lo:[1,0,0] neg_hi:[1,0,0] +; GFX906-NEXT: s_setpc_b64 s[30:31] +; +; GFX908-LABEL: v_sdot2_fneg_a: +; GFX908: ; %bb.0: +; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX908-NEXT: v_dot2_i32_i16 v0, v0, v1, v2 neg_lo:[1,0,0] neg_hi:[1,0,0] +; GFX908-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_sdot2_fneg_a: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: v_dot2_i32_i16 v0, v0, v1, v2 neg_lo:[1,0,0] neg_hi:[1,0,0] +; GFX10-NEXT: ; implicit-def: $vcc_hi +; GFX10-NEXT: s_setpc_b64 s[30:31] + %neg.a = fneg <2 x half> %a + %cast.neg.a = bitcast <2 x half> %neg.a to <2 x i16> + %r = call i32 @llvm.amdgcn.sdot2(<2 x i16> %cast.neg.a, <2 x i16> %b, i32 %c, i1 false) + ret i32 %r +} + +define i32 @v_sdot2_fneg_b(<2 x i16> %a, <2 x half> %b, i32 %c) { +; GFX906-LABEL: v_sdot2_fneg_b: +; GFX906: ; %bb.0: +; GFX906-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX906-NEXT: v_dot2_i32_i16 v0, v0, v1, v2 neg_lo:[0,1,0] neg_hi:[0,1,0] +; GFX906-NEXT: s_setpc_b64 s[30:31] +; +; GFX908-LABEL: v_sdot2_fneg_b: +; GFX908: ; %bb.0: +; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX908-NEXT: v_dot2_i32_i16 v0, v0, v1, v2 neg_lo:[0,1,0] neg_hi:[0,1,0] +; GFX908-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_sdot2_fneg_b: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: v_dot2_i32_i16 v0, v0, v1, v2 neg_lo:[0,1,0] neg_hi:[0,1,0] +; GFX10-NEXT: ; implicit-def: $vcc_hi +; GFX10-NEXT: s_setpc_b64 s[30:31] + %neg.b = fneg <2 x half> %b + %cast.neg.b = bitcast <2 x half> %neg.b to <2 x i16> + %r = call i32 @llvm.amdgcn.sdot2(<2 x i16> %a, <2 x i16> %cast.neg.b, i32 %c, i1 false) + ret i32 %r +} + +define i32 @v_sdot2_fnegf32_c(<2 x i16> %a, <2 x i16> %b, float %c) { +; GFX906-LABEL: v_sdot2_fnegf32_c: +; GFX906: ; %bb.0: +; GFX906-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX906-NEXT: v_dot2_i32_i16 v0, v0, v1, v2 neg_lo:[0,0,1] neg_hi:[0,0,1] +; GFX906-NEXT: s_setpc_b64 s[30:31] +; +; GFX908-LABEL: v_sdot2_fnegf32_c: +; GFX908: ; %bb.0: +; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX908-NEXT: v_dot2_i32_i16 v0, v0, v1, v2 neg_lo:[0,0,1] neg_hi:[0,0,1] +; GFX908-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_sdot2_fnegf32_c: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: v_dot2_i32_i16 v0, v0, v1, v2 neg_lo:[0,0,1] neg_hi:[0,0,1] +; GFX10-NEXT: ; implicit-def: $vcc_hi +; GFX10-NEXT: s_setpc_b64 s[30:31] + %neg.c = fneg float %c + %cast.neg.c = bitcast float %neg.c to i32 + %r = call i32 @llvm.amdgcn.sdot2(<2 x i16> %a, <2 x i16> %b, i32 %cast.neg.c, i1 false) + ret i32 %r +} + +define i32 @v_sdot2_fnegv2f16_c(<2 x i16> %a, <2 x i16> %b, <2 x half> %c) { +; GFX906-LABEL: v_sdot2_fnegv2f16_c: +; GFX906: ; %bb.0: +; GFX906-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX906-NEXT: v_xor_b32_e32 v2, 0x80008000, v2 +; GFX906-NEXT: v_dot2_i32_i16 v0, v0, v1, v2 +; GFX906-NEXT: s_setpc_b64 s[30:31] +; +; GFX908-LABEL: v_sdot2_fnegv2f16_c: +; GFX908: ; %bb.0: +; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX908-NEXT: v_xor_b32_e32 v2, 0x80008000, v2 +; GFX908-NEXT: v_dot2_i32_i16 v0, v0, v1, v2 +; GFX908-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_sdot2_fnegv2f16_c: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: v_xor_b32_e32 v2, 0x80008000, v2 +; GFX10-NEXT: ; implicit-def: $vcc_hi +; GFX10-NEXT: v_dot2_i32_i16 v0, v0, v1, v2 +; GFX10-NEXT: s_setpc_b64 s[30:31] + %neg.c = fneg <2 x half> %c + %cast.neg.c = bitcast <2 x half> %neg.c to i32 + %r = call i32 @llvm.amdgcn.sdot2(<2 x i16> %a, <2 x i16> %b, i32 %cast.neg.c, i1 false) + ret i32 %r +} + +define i32 @v_sdot2_shuffle10_a(<2 x i16> %a, <2 x i16> %b, i32 %c) { +; GFX906-LABEL: v_sdot2_shuffle10_a: +; GFX906: ; %bb.0: +; GFX906-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX906-NEXT: v_alignbit_b32 v0, v0, v0, 16 +; GFX906-NEXT: v_dot2_i32_i16 v0, v0, v1, v2 +; GFX906-NEXT: s_setpc_b64 s[30:31] +; +; GFX908-LABEL: v_sdot2_shuffle10_a: +; GFX908: ; %bb.0: +; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX908-NEXT: v_alignbit_b32 v0, v0, v0, 16 +; GFX908-NEXT: v_dot2_i32_i16 v0, v0, v1, v2 +; GFX908-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_sdot2_shuffle10_a: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: v_alignbit_b32 v0, v0, v0, 16 +; GFX10-NEXT: ; implicit-def: $vcc_hi +; GFX10-NEXT: v_dot2_i32_i16 v0, v0, v1, v2 +; GFX10-NEXT: s_setpc_b64 s[30:31] + %shuf.a = shufflevector <2 x i16> %a, <2 x i16> undef, <2 x i32> + %r = call i32 @llvm.amdgcn.sdot2(<2 x i16> %shuf.a, <2 x i16> %b, i32 %c, i1 false) + ret i32 %r +} + +define i32 @v_sdot2_shuffle10_b(<2 x i16> %a, <2 x i16> %b, i32 %c) { +; GFX906-LABEL: v_sdot2_shuffle10_b: +; GFX906: ; %bb.0: +; GFX906-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX906-NEXT: v_alignbit_b32 v1, v1, v1, 16 +; GFX906-NEXT: v_dot2_i32_i16 v0, v0, v1, v2 +; GFX906-NEXT: s_setpc_b64 s[30:31] +; +; GFX908-LABEL: v_sdot2_shuffle10_b: +; GFX908: ; %bb.0: +; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX908-NEXT: v_alignbit_b32 v1, v1, v1, 16 +; GFX908-NEXT: v_dot2_i32_i16 v0, v0, v1, v2 +; GFX908-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_sdot2_shuffle10_b: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: v_alignbit_b32 v1, v1, v1, 16 +; GFX10-NEXT: ; implicit-def: $vcc_hi +; GFX10-NEXT: v_dot2_i32_i16 v0, v0, v1, v2 +; GFX10-NEXT: s_setpc_b64 s[30:31] + %shuf.b = shufflevector <2 x i16> %b, <2 x i16> undef, <2 x i32> + %r = call i32 @llvm.amdgcn.sdot2(<2 x i16> %a, <2 x i16> %shuf.b, i32 %c, i1 false) + ret i32 %r +} + +declare i32 @llvm.amdgcn.sdot2(<2 x i16>, <2 x i16>, i32, i1 immarg) #0 + +attributes #0 = { nounwind readnone speculatable } Index: llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.sdot4.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.sdot4.ll @@ -0,0 +1,141 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -global-isel -march=amdgcn -mcpu=gfx906 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX906 %s +; RUN: llc -global-isel -march=amdgcn -mcpu=gfx1011 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX10 %s +; RUN: llc -global-isel -march=amdgcn -mcpu=gfx1012 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX10 %s + +define i32 @v_sdot4(i32 %a, i32 %b, i32 %c) { +; GFX906-LABEL: v_sdot4: +; GFX906: ; %bb.0: +; GFX906-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX906-NEXT: v_dot4_i32_i8 v0, v0, v1, v2 +; GFX906-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_sdot4: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: v_dot4_i32_i8 v0, v0, v1, v2 +; GFX10-NEXT: ; implicit-def: $vcc_hi +; GFX10-NEXT: s_setpc_b64 s[30:31] + %r = call i32 @llvm.amdgcn.sdot4(i32 %a, i32 %b, i32 %c, i1 false) + ret i32 %r +} + +define i32 @v_sdot4_clamp(i32 %a, i32 %b, i32 %c) { +; GFX906-LABEL: v_sdot4_clamp: +; GFX906: ; %bb.0: +; GFX906-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX906-NEXT: v_dot4_i32_i8 v0, v0, v1, v2 clamp +; GFX906-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_sdot4_clamp: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: v_dot4_i32_i8 v0, v0, v1, v2 clamp +; GFX10-NEXT: ; implicit-def: $vcc_hi +; GFX10-NEXT: s_setpc_b64 s[30:31] + %r = call i32 @llvm.amdgcn.sdot4(i32 %a, i32 %b, i32 %c, i1 true) + ret i32 %r +} + +; FIXME: bitcast should not expand +define i32 @v_sdot4_cast_v4i8(<4 x i8> %a, <4 x i8> %b, i32 %c) { +; GFX906-LABEL: v_sdot4_cast_v4i8: +; GFX906: ; %bb.0: +; GFX906-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX906-NEXT: s_movk_i32 s4, 0xff +; GFX906-NEXT: v_and_b32_e32 v1, s4, v1 +; GFX906-NEXT: v_lshlrev_b16_e32 v1, 8, v1 +; GFX906-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX906-NEXT: v_and_b32_e32 v1, s4, v3 +; GFX906-NEXT: v_lshlrev_b16_e32 v1, 8, v1 +; GFX906-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX906-NEXT: v_bfe_u32 v0, v0, 0, 16 +; GFX906-NEXT: v_bfe_u32 v1, v1, 0, 16 +; GFX906-NEXT: v_and_b32_e32 v2, s4, v7 +; GFX906-NEXT: v_lshl_or_b32 v0, v1, 16, v0 +; GFX906-NEXT: v_and_b32_e32 v1, s4, v5 +; GFX906-NEXT: v_lshlrev_b16_e32 v1, 8, v1 +; GFX906-NEXT: v_lshlrev_b16_e32 v2, 8, v2 +; GFX906-NEXT: v_or_b32_sdwa v1, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX906-NEXT: v_or_b32_sdwa v2, v6, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX906-NEXT: v_bfe_u32 v1, v1, 0, 16 +; GFX906-NEXT: v_bfe_u32 v2, v2, 0, 16 +; GFX906-NEXT: v_lshl_or_b32 v1, v2, 16, v1 +; GFX906-NEXT: v_dot4_i32_i8 v0, v0, v1, v8 +; GFX906-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_sdot4_cast_v4i8: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: s_movk_i32 s4, 0xff +; GFX10-NEXT: ; implicit-def: $vcc_hi +; GFX10-NEXT: v_and_b32_sdwa v1, v1, s4 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX10-NEXT: v_and_b32_sdwa v3, v3, s4 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX10-NEXT: v_and_b32_sdwa v5, v5, s4 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX10-NEXT: v_and_b32_sdwa v7, v7, s4 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX10-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX10-NEXT: v_or_b32_sdwa v1, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX10-NEXT: v_or_b32_sdwa v2, v4, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX10-NEXT: v_or_b32_sdwa v3, v6, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX10-NEXT: v_bfe_u32 v0, v0, 0, 16 +; GFX10-NEXT: v_bfe_u32 v1, v1, 0, 16 +; GFX10-NEXT: v_bfe_u32 v2, v2, 0, 16 +; GFX10-NEXT: v_bfe_u32 v3, v3, 0, 16 +; GFX10-NEXT: v_lshl_or_b32 v7, v1, 16, v0 +; GFX10-NEXT: v_lshl_or_b32 v1, v3, 16, v2 +; GFX10-NEXT: v_dot4_i32_i8 v0, v7, v1, v8 +; GFX10-NEXT: s_setpc_b64 s[30:31] + %a.cast = bitcast <4 x i8> %a to i32 + %b.cast = bitcast <4 x i8> %b to i32 + %r = call i32 @llvm.amdgcn.sdot4(i32 %a.cast, i32 %b.cast, i32 %c, i1 false) + ret i32 %r +} + +define i32 @v_sdot4_fnegf32_a(float %a, i32 %b, i32 %c) { +; GFX906-LABEL: v_sdot4_fnegf32_a: +; GFX906: ; %bb.0: +; GFX906-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX906-NEXT: v_dot4_i32_i8 v0, v0, v1, v2 neg_lo:[1,0,0] neg_hi:[1,0,0] +; GFX906-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_sdot4_fnegf32_a: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: v_dot4_i32_i8 v0, v0, v1, v2 neg_lo:[1,0,0] neg_hi:[1,0,0] +; GFX10-NEXT: ; implicit-def: $vcc_hi +; GFX10-NEXT: s_setpc_b64 s[30:31] + %neg.a = fneg float %a + %cast.neg.a = bitcast float %neg.a to i32 + %r = call i32 @llvm.amdgcn.sdot4(i32 %cast.neg.a, i32 %b, i32 %c, i1 false) + ret i32 %r +} + +define i32 @v_sdot4_fnegv2f16_a(<2 x half> %a, i32 %b, i32 %c) { +; GFX906-LABEL: v_sdot4_fnegv2f16_a: +; GFX906: ; %bb.0: +; GFX906-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX906-NEXT: v_xor_b32_e32 v0, 0x80008000, v0 +; GFX906-NEXT: v_dot4_i32_i8 v0, v0, v1, v2 +; GFX906-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_sdot4_fnegv2f16_a: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: v_xor_b32_e32 v0, 0x80008000, v0 +; GFX10-NEXT: ; implicit-def: $vcc_hi +; GFX10-NEXT: v_dot4_i32_i8 v0, v0, v1, v2 +; GFX10-NEXT: s_setpc_b64 s[30:31] + %neg.a = fneg <2 x half> %a + %cast.neg.a = bitcast <2 x half> %neg.a to i32 + %r = call i32 @llvm.amdgcn.sdot4(i32 %cast.neg.a, i32 %b, i32 %c, i1 false) + ret i32 %r +} + +declare i32 @llvm.amdgcn.sdot4(i32, i32, i32, i1 immarg) #0 + +attributes #0 = { nounwind readnone speculatable } Index: llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.sdot8.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.sdot8.ll @@ -0,0 +1,94 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -global-isel -march=amdgcn -mcpu=gfx906 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX906 %s +; RUN: llc -global-isel -march=amdgcn -mcpu=gfx1011 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX10 %s +; RUN: llc -global-isel -march=amdgcn -mcpu=gfx1012 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX10 %s + +define i32 @v_sdot8(i32 %a, i32 %b, i32 %c) { +; GFX906-LABEL: v_sdot8: +; GFX906: ; %bb.0: +; GFX906-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX906-NEXT: v_dot8_i32_i4 v0, v0, v1, v2 +; GFX906-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_sdot8: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: v_dot8_i32_i4 v0, v0, v1, v2 +; GFX10-NEXT: ; implicit-def: $vcc_hi +; GFX10-NEXT: s_setpc_b64 s[30:31] + %r = call i32 @llvm.amdgcn.sdot8(i32 %a, i32 %b, i32 %c, i1 false) + ret i32 %r +} + +define i32 @v_sdot8_clamp(i32 %a, i32 %b, i32 %c) { +; GFX906-LABEL: v_sdot8_clamp: +; GFX906: ; %bb.0: +; GFX906-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX906-NEXT: v_dot8_i32_i4 v0, v0, v1, v2 clamp +; GFX906-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_sdot8_clamp: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: v_dot8_i32_i4 v0, v0, v1, v2 clamp +; GFX10-NEXT: ; implicit-def: $vcc_hi +; GFX10-NEXT: s_setpc_b64 s[30:31] + %r = call i32 @llvm.amdgcn.sdot8(i32 %a, i32 %b, i32 %c, i1 true) + ret i32 %r +} + +; FIXME: Fix argument do not let these casts expand +; define i32 @v_sdot8_cast_v8i4(<8 x i4> %a, <8 x i4> %b, i32 %c) { +; %a.cast = bitcast <8 x i4> %a to i32 +; %b.cast = bitcast <8 x i4> %b to i32 +; %r = call i32 @llvm.amdgcn.sdot8(i32 %a.cast, i32 %b.cast, i32 %c, i1 false) +; ret i32 %r +; } + +define i32 @v_sdot8_fnegf32_a(float %a, i32 %b, i32 %c) { +; GFX906-LABEL: v_sdot8_fnegf32_a: +; GFX906: ; %bb.0: +; GFX906-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX906-NEXT: v_dot8_i32_i4 v0, v0, v1, v2 neg_lo:[1,0,0] neg_hi:[1,0,0] +; GFX906-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_sdot8_fnegf32_a: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: v_dot8_i32_i4 v0, v0, v1, v2 neg_lo:[1,0,0] neg_hi:[1,0,0] +; GFX10-NEXT: ; implicit-def: $vcc_hi +; GFX10-NEXT: s_setpc_b64 s[30:31] + %neg.a = fneg float %a + %cast.neg.a = bitcast float %neg.a to i32 + %r = call i32 @llvm.amdgcn.sdot8(i32 %cast.neg.a, i32 %b, i32 %c, i1 false) + ret i32 %r +} + +define i32 @v_sdot8_fnegv2f16_a(<2 x half> %a, i32 %b, i32 %c) { +; GFX906-LABEL: v_sdot8_fnegv2f16_a: +; GFX906: ; %bb.0: +; GFX906-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX906-NEXT: v_xor_b32_e32 v0, 0x80008000, v0 +; GFX906-NEXT: v_dot8_i32_i4 v0, v0, v1, v2 +; GFX906-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_sdot8_fnegv2f16_a: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: v_xor_b32_e32 v0, 0x80008000, v0 +; GFX10-NEXT: ; implicit-def: $vcc_hi +; GFX10-NEXT: v_dot8_i32_i4 v0, v0, v1, v2 +; GFX10-NEXT: s_setpc_b64 s[30:31] + %neg.a = fneg <2 x half> %a + %cast.neg.a = bitcast <2 x half> %neg.a to i32 + %r = call i32 @llvm.amdgcn.sdot8(i32 %cast.neg.a, i32 %b, i32 %c, i1 false) + ret i32 %r +} + +declare i32 @llvm.amdgcn.sdot8(i32, i32, i32, i1 immarg) #0 + +attributes #0 = { nounwind readnone speculatable } Index: llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.udot2.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.udot2.ll @@ -0,0 +1,388 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -global-isel -march=amdgcn -mcpu=gfx906 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX906 %s +; RUN: llc -global-isel -march=amdgcn -mcpu=gfx908 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX908 %s +; RUN: llc -global-isel -march=amdgcn -mcpu=gfx1011 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX10 %s +; RUN: llc -global-isel -march=amdgcn -mcpu=gfx1012 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX10 %s + +define i32 @v_udot2(<2 x i16> %a, <2 x i16> %b, i32 %c) { +; GFX906-LABEL: v_udot2: +; GFX906: ; %bb.0: +; GFX906-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX906-NEXT: v_dot2_u32_u16 v0, v0, v1, v2 +; GFX906-NEXT: s_setpc_b64 s[30:31] +; +; GFX908-LABEL: v_udot2: +; GFX908: ; %bb.0: +; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX908-NEXT: v_dot2_u32_u16 v0, v0, v1, v2 +; GFX908-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_udot2: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: v_dot2_u32_u16 v0, v0, v1, v2 +; GFX10-NEXT: ; implicit-def: $vcc_hi +; GFX10-NEXT: s_setpc_b64 s[30:31] + %r = call i32 @llvm.amdgcn.udot2(<2 x i16> %a, <2 x i16> %b, i32 %c, i1 false) + ret i32 %r +} + +define i32 @v_udot2_clamp(<2 x i16> %a, <2 x i16> %b, i32 %c) { +; GFX906-LABEL: v_udot2_clamp: +; GFX906: ; %bb.0: +; GFX906-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX906-NEXT: v_dot2_u32_u16 v0, v0, v1, v2 clamp +; GFX906-NEXT: s_setpc_b64 s[30:31] +; +; GFX908-LABEL: v_udot2_clamp: +; GFX908: ; %bb.0: +; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX908-NEXT: v_dot2_u32_u16 v0, v0, v1, v2 clamp +; GFX908-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_udot2_clamp: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: v_dot2_u32_u16 v0, v0, v1, v2 clamp +; GFX10-NEXT: ; implicit-def: $vcc_hi +; GFX10-NEXT: s_setpc_b64 s[30:31] + %r = call i32 @llvm.amdgcn.udot2(<2 x i16> %a, <2 x i16> %b, i32 %c, i1 true) + ret i32 %r +} + +define amdgpu_ps float @v_udot2_sgpr_sgpr_sgpr(<2 x i16> inreg %a, <2 x i16> inreg %b, i32 inreg %c) { +; GFX906-LABEL: v_udot2_sgpr_sgpr_sgpr: +; GFX906: ; %bb.0: +; GFX906-NEXT: v_mov_b32_e32 v0, s1 +; GFX906-NEXT: v_mov_b32_e32 v1, s2 +; GFX906-NEXT: v_dot2_u32_u16 v0, s0, v0, v1 +; GFX906-NEXT: ; return to shader part epilog +; +; GFX908-LABEL: v_udot2_sgpr_sgpr_sgpr: +; GFX908: ; %bb.0: +; GFX908-NEXT: v_mov_b32_e32 v0, s1 +; GFX908-NEXT: v_mov_b32_e32 v1, s2 +; GFX908-NEXT: v_dot2_u32_u16 v0, s0, v0, v1 +; GFX908-NEXT: ; return to shader part epilog +; +; GFX10-LABEL: v_udot2_sgpr_sgpr_sgpr: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_mov_b32_e32 v0, s2 +; GFX10-NEXT: ; implicit-def: $vcc_hi +; GFX10-NEXT: v_dot2_u32_u16 v0, s0, s1, v0 +; GFX10-NEXT: ; return to shader part epilog + %r = call i32 @llvm.amdgcn.udot2(<2 x i16> %a, <2 x i16> %b, i32 %c, i1 false) + %cast = bitcast i32 %r to float + ret float %cast +} + +define i32 @v_udot2_inline_literal_a(<2 x i16> %b, i32 %c) { +; GFX906-LABEL: v_udot2_inline_literal_a: +; GFX906: ; %bb.0: +; GFX906-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX906-NEXT: s_pack_ll_b32_b16 s4, 4, 4 +; GFX906-NEXT: v_dot2_u32_u16 v0, s4, v0, v1 +; GFX906-NEXT: s_setpc_b64 s[30:31] +; +; GFX908-LABEL: v_udot2_inline_literal_a: +; GFX908: ; %bb.0: +; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX908-NEXT: s_pack_ll_b32_b16 s4, 4, 4 +; GFX908-NEXT: v_dot2_u32_u16 v0, s4, v0, v1 +; GFX908-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_udot2_inline_literal_a: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: s_pack_ll_b32_b16 s4, 4, 4 +; GFX10-NEXT: ; implicit-def: $vcc_hi +; GFX10-NEXT: v_dot2_u32_u16 v0, s4, v0, v1 +; GFX10-NEXT: s_setpc_b64 s[30:31] + %r = call i32 @llvm.amdgcn.udot2(<2 x i16> , <2 x i16> %b, i32 %c, i1 false) + ret i32 %r +} + +define i32 @v_udot2_inline_literal_b(<2 x i16> %a, i32 %c) { +; GFX906-LABEL: v_udot2_inline_literal_b: +; GFX906: ; %bb.0: +; GFX906-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX906-NEXT: s_pack_ll_b32_b16 s4, 4, 4 +; GFX906-NEXT: v_dot2_u32_u16 v0, v0, s4, v1 +; GFX906-NEXT: s_setpc_b64 s[30:31] +; +; GFX908-LABEL: v_udot2_inline_literal_b: +; GFX908: ; %bb.0: +; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX908-NEXT: s_pack_ll_b32_b16 s4, 4, 4 +; GFX908-NEXT: v_dot2_u32_u16 v0, v0, s4, v1 +; GFX908-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_udot2_inline_literal_b: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: s_pack_ll_b32_b16 s4, 4, 4 +; GFX10-NEXT: ; implicit-def: $vcc_hi +; GFX10-NEXT: v_dot2_u32_u16 v0, v0, s4, v1 +; GFX10-NEXT: s_setpc_b64 s[30:31] + %r = call i32 @llvm.amdgcn.udot2(<2 x i16> %a, <2 x i16> , i32 %c, i1 false) + ret i32 %r +} + +define i32 @v_udot2_inline_literal_a_b(<2 x i16> %a, i32 %c) { +; GFX906-LABEL: v_udot2_inline_literal_a_b: +; GFX906: ; %bb.0: +; GFX906-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX906-NEXT: s_pack_ll_b32_b16 s4, 4, 4 +; GFX906-NEXT: s_pack_ll_b32_b16 s5, 8, 8 +; GFX906-NEXT: v_mov_b32_e32 v0, s4 +; GFX906-NEXT: v_dot2_u32_u16 v0, s5, v0, v1 +; GFX906-NEXT: s_setpc_b64 s[30:31] +; +; GFX908-LABEL: v_udot2_inline_literal_a_b: +; GFX908: ; %bb.0: +; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX908-NEXT: s_pack_ll_b32_b16 s4, 4, 4 +; GFX908-NEXT: s_pack_ll_b32_b16 s5, 8, 8 +; GFX908-NEXT: v_mov_b32_e32 v0, s4 +; GFX908-NEXT: v_dot2_u32_u16 v0, s5, v0, v1 +; GFX908-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_udot2_inline_literal_a_b: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: s_pack_ll_b32_b16 s4, 8, 8 +; GFX10-NEXT: s_pack_ll_b32_b16 s5, 4, 4 +; GFX10-NEXT: ; implicit-def: $vcc_hi +; GFX10-NEXT: v_dot2_u32_u16 v0, s4, s5, v1 +; GFX10-NEXT: s_setpc_b64 s[30:31] + %r = call i32 @llvm.amdgcn.udot2(<2 x i16> , <2 x i16> , i32 %c, i1 false) + ret i32 %r +} + +define i32 @v_udot2_inline_literal_a_b_c() { +; GFX906-LABEL: v_udot2_inline_literal_a_b_c: +; GFX906: ; %bb.0: +; GFX906-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX906-NEXT: s_pack_ll_b32_b16 s4, 4, 4 +; GFX906-NEXT: s_pack_ll_b32_b16 s5, 8, 8 +; GFX906-NEXT: v_mov_b32_e32 v0, s4 +; GFX906-NEXT: v_dot2_u32_u16 v0, s5, v0, 8 +; GFX906-NEXT: s_setpc_b64 s[30:31] +; +; GFX908-LABEL: v_udot2_inline_literal_a_b_c: +; GFX908: ; %bb.0: +; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX908-NEXT: s_pack_ll_b32_b16 s4, 4, 4 +; GFX908-NEXT: s_pack_ll_b32_b16 s5, 8, 8 +; GFX908-NEXT: v_mov_b32_e32 v0, s4 +; GFX908-NEXT: v_dot2_u32_u16 v0, s5, v0, 8 +; GFX908-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_udot2_inline_literal_a_b_c: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: s_pack_ll_b32_b16 s4, 8, 8 +; GFX10-NEXT: s_pack_ll_b32_b16 s5, 4, 4 +; GFX10-NEXT: ; implicit-def: $vcc_hi +; GFX10-NEXT: v_dot2_u32_u16 v0, s4, s5, 8 +; GFX10-NEXT: s_setpc_b64 s[30:31] + %r = call i32 @llvm.amdgcn.udot2(<2 x i16> , <2 x i16> , i32 8, i1 false) + ret i32 %r +} + +define i32 @v_udot2_inline_literal_c(<2 x i16> %a, <2 x i16> %b) { +; GFX906-LABEL: v_udot2_inline_literal_c: +; GFX906: ; %bb.0: +; GFX906-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX906-NEXT: v_dot2_u32_u16 v0, v0, v1, 7 +; GFX906-NEXT: s_setpc_b64 s[30:31] +; +; GFX908-LABEL: v_udot2_inline_literal_c: +; GFX908: ; %bb.0: +; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX908-NEXT: v_dot2_u32_u16 v0, v0, v1, 7 +; GFX908-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_udot2_inline_literal_c: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: v_dot2_u32_u16 v0, v0, v1, 7 +; GFX10-NEXT: ; implicit-def: $vcc_hi +; GFX10-NEXT: s_setpc_b64 s[30:31] + %r = call i32 @llvm.amdgcn.udot2(<2 x i16> %a, <2 x i16> %b, i32 7, i1 false) + ret i32 %r +} + +define i32 @v_udot2_fneg_a(<2 x half> %a, <2 x i16> %b, i32 %c) { +; GFX906-LABEL: v_udot2_fneg_a: +; GFX906: ; %bb.0: +; GFX906-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX906-NEXT: v_dot2_u32_u16 v0, v0, v1, v2 neg_lo:[1,0,0] neg_hi:[1,0,0] +; GFX906-NEXT: s_setpc_b64 s[30:31] +; +; GFX908-LABEL: v_udot2_fneg_a: +; GFX908: ; %bb.0: +; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX908-NEXT: v_dot2_u32_u16 v0, v0, v1, v2 neg_lo:[1,0,0] neg_hi:[1,0,0] +; GFX908-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_udot2_fneg_a: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: v_dot2_u32_u16 v0, v0, v1, v2 neg_lo:[1,0,0] neg_hi:[1,0,0] +; GFX10-NEXT: ; implicit-def: $vcc_hi +; GFX10-NEXT: s_setpc_b64 s[30:31] + %neg.a = fneg <2 x half> %a + %cast.neg.a = bitcast <2 x half> %neg.a to <2 x i16> + %r = call i32 @llvm.amdgcn.udot2(<2 x i16> %cast.neg.a, <2 x i16> %b, i32 %c, i1 false) + ret i32 %r +} + +define i32 @v_udot2_fneg_b(<2 x i16> %a, <2 x half> %b, i32 %c) { +; GFX906-LABEL: v_udot2_fneg_b: +; GFX906: ; %bb.0: +; GFX906-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX906-NEXT: v_dot2_u32_u16 v0, v0, v1, v2 neg_lo:[0,1,0] neg_hi:[0,1,0] +; GFX906-NEXT: s_setpc_b64 s[30:31] +; +; GFX908-LABEL: v_udot2_fneg_b: +; GFX908: ; %bb.0: +; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX908-NEXT: v_dot2_u32_u16 v0, v0, v1, v2 neg_lo:[0,1,0] neg_hi:[0,1,0] +; GFX908-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_udot2_fneg_b: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: v_dot2_u32_u16 v0, v0, v1, v2 neg_lo:[0,1,0] neg_hi:[0,1,0] +; GFX10-NEXT: ; implicit-def: $vcc_hi +; GFX10-NEXT: s_setpc_b64 s[30:31] + %neg.b = fneg <2 x half> %b + %cast.neg.b = bitcast <2 x half> %neg.b to <2 x i16> + %r = call i32 @llvm.amdgcn.udot2(<2 x i16> %a, <2 x i16> %cast.neg.b, i32 %c, i1 false) + ret i32 %r +} + +define i32 @v_udot2_fnegf32_c(<2 x i16> %a, <2 x i16> %b, float %c) { +; GFX906-LABEL: v_udot2_fnegf32_c: +; GFX906: ; %bb.0: +; GFX906-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX906-NEXT: v_dot2_u32_u16 v0, v0, v1, v2 neg_lo:[0,0,1] neg_hi:[0,0,1] +; GFX906-NEXT: s_setpc_b64 s[30:31] +; +; GFX908-LABEL: v_udot2_fnegf32_c: +; GFX908: ; %bb.0: +; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX908-NEXT: v_dot2_u32_u16 v0, v0, v1, v2 neg_lo:[0,0,1] neg_hi:[0,0,1] +; GFX908-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_udot2_fnegf32_c: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: v_dot2_u32_u16 v0, v0, v1, v2 neg_lo:[0,0,1] neg_hi:[0,0,1] +; GFX10-NEXT: ; implicit-def: $vcc_hi +; GFX10-NEXT: s_setpc_b64 s[30:31] + %neg.c = fneg float %c + %cast.neg.c = bitcast float %neg.c to i32 + %r = call i32 @llvm.amdgcn.udot2(<2 x i16> %a, <2 x i16> %b, i32 %cast.neg.c, i1 false) + ret i32 %r +} + +define i32 @v_udot2_fnegv2f16_c(<2 x i16> %a, <2 x i16> %b, <2 x half> %c) { +; GFX906-LABEL: v_udot2_fnegv2f16_c: +; GFX906: ; %bb.0: +; GFX906-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX906-NEXT: v_xor_b32_e32 v2, 0x80008000, v2 +; GFX906-NEXT: v_dot2_u32_u16 v0, v0, v1, v2 +; GFX906-NEXT: s_setpc_b64 s[30:31] +; +; GFX908-LABEL: v_udot2_fnegv2f16_c: +; GFX908: ; %bb.0: +; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX908-NEXT: v_xor_b32_e32 v2, 0x80008000, v2 +; GFX908-NEXT: v_dot2_u32_u16 v0, v0, v1, v2 +; GFX908-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_udot2_fnegv2f16_c: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: v_xor_b32_e32 v2, 0x80008000, v2 +; GFX10-NEXT: ; implicit-def: $vcc_hi +; GFX10-NEXT: v_dot2_u32_u16 v0, v0, v1, v2 +; GFX10-NEXT: s_setpc_b64 s[30:31] + %neg.c = fneg <2 x half> %c + %cast.neg.c = bitcast <2 x half> %neg.c to i32 + %r = call i32 @llvm.amdgcn.udot2(<2 x i16> %a, <2 x i16> %b, i32 %cast.neg.c, i1 false) + ret i32 %r +} + +define i32 @v_udot2_shuffle10_a(<2 x i16> %a, <2 x i16> %b, i32 %c) { +; GFX906-LABEL: v_udot2_shuffle10_a: +; GFX906: ; %bb.0: +; GFX906-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX906-NEXT: v_alignbit_b32 v0, v0, v0, 16 +; GFX906-NEXT: v_dot2_u32_u16 v0, v0, v1, v2 +; GFX906-NEXT: s_setpc_b64 s[30:31] +; +; GFX908-LABEL: v_udot2_shuffle10_a: +; GFX908: ; %bb.0: +; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX908-NEXT: v_alignbit_b32 v0, v0, v0, 16 +; GFX908-NEXT: v_dot2_u32_u16 v0, v0, v1, v2 +; GFX908-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_udot2_shuffle10_a: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: v_alignbit_b32 v0, v0, v0, 16 +; GFX10-NEXT: ; implicit-def: $vcc_hi +; GFX10-NEXT: v_dot2_u32_u16 v0, v0, v1, v2 +; GFX10-NEXT: s_setpc_b64 s[30:31] + %shuf.a = shufflevector <2 x i16> %a, <2 x i16> undef, <2 x i32> + %r = call i32 @llvm.amdgcn.udot2(<2 x i16> %shuf.a, <2 x i16> %b, i32 %c, i1 false) + ret i32 %r +} + +define i32 @v_udot2_shuffle10_b(<2 x i16> %a, <2 x i16> %b, i32 %c) { +; GFX906-LABEL: v_udot2_shuffle10_b: +; GFX906: ; %bb.0: +; GFX906-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX906-NEXT: v_alignbit_b32 v1, v1, v1, 16 +; GFX906-NEXT: v_dot2_u32_u16 v0, v0, v1, v2 +; GFX906-NEXT: s_setpc_b64 s[30:31] +; +; GFX908-LABEL: v_udot2_shuffle10_b: +; GFX908: ; %bb.0: +; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX908-NEXT: v_alignbit_b32 v1, v1, v1, 16 +; GFX908-NEXT: v_dot2_u32_u16 v0, v0, v1, v2 +; GFX908-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_udot2_shuffle10_b: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: v_alignbit_b32 v1, v1, v1, 16 +; GFX10-NEXT: ; implicit-def: $vcc_hi +; GFX10-NEXT: v_dot2_u32_u16 v0, v0, v1, v2 +; GFX10-NEXT: s_setpc_b64 s[30:31] + %shuf.b = shufflevector <2 x i16> %b, <2 x i16> undef, <2 x i32> + %r = call i32 @llvm.amdgcn.udot2(<2 x i16> %a, <2 x i16> %shuf.b, i32 %c, i1 false) + ret i32 %r +} + +declare i32 @llvm.amdgcn.udot2(<2 x i16>, <2 x i16>, i32, i1 immarg) #0 + +attributes #0 = { nounwind readnone speculatable } Index: llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.udot4.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.udot4.ll @@ -0,0 +1,141 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -global-isel -march=amdgcn -mcpu=gfx906 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX906 %s +; RUN: llc -global-isel -march=amdgcn -mcpu=gfx1011 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX10 %s +; RUN: llc -global-isel -march=amdgcn -mcpu=gfx1012 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX10 %s + +define i32 @v_udot4(i32 %a, i32 %b, i32 %c) { +; GFX906-LABEL: v_udot4: +; GFX906: ; %bb.0: +; GFX906-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX906-NEXT: v_dot4_u32_u8 v0, v0, v1, v2 +; GFX906-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_udot4: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: v_dot4_u32_u8 v0, v0, v1, v2 +; GFX10-NEXT: ; implicit-def: $vcc_hi +; GFX10-NEXT: s_setpc_b64 s[30:31] + %r = call i32 @llvm.amdgcn.udot4(i32 %a, i32 %b, i32 %c, i1 false) + ret i32 %r +} + +define i32 @v_udot4_clamp(i32 %a, i32 %b, i32 %c) { +; GFX906-LABEL: v_udot4_clamp: +; GFX906: ; %bb.0: +; GFX906-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX906-NEXT: v_dot4_u32_u8 v0, v0, v1, v2 clamp +; GFX906-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_udot4_clamp: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: v_dot4_u32_u8 v0, v0, v1, v2 clamp +; GFX10-NEXT: ; implicit-def: $vcc_hi +; GFX10-NEXT: s_setpc_b64 s[30:31] + %r = call i32 @llvm.amdgcn.udot4(i32 %a, i32 %b, i32 %c, i1 true) + ret i32 %r +} + +; FIXME: bitcast should not expand +define i32 @v_udot4_cast_v4i8(<4 x i8> %a, <4 x i8> %b, i32 %c) { +; GFX906-LABEL: v_udot4_cast_v4i8: +; GFX906: ; %bb.0: +; GFX906-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX906-NEXT: s_movk_i32 s4, 0xff +; GFX906-NEXT: v_and_b32_e32 v1, s4, v1 +; GFX906-NEXT: v_lshlrev_b16_e32 v1, 8, v1 +; GFX906-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX906-NEXT: v_and_b32_e32 v1, s4, v3 +; GFX906-NEXT: v_lshlrev_b16_e32 v1, 8, v1 +; GFX906-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX906-NEXT: v_bfe_u32 v0, v0, 0, 16 +; GFX906-NEXT: v_bfe_u32 v1, v1, 0, 16 +; GFX906-NEXT: v_and_b32_e32 v2, s4, v7 +; GFX906-NEXT: v_lshl_or_b32 v0, v1, 16, v0 +; GFX906-NEXT: v_and_b32_e32 v1, s4, v5 +; GFX906-NEXT: v_lshlrev_b16_e32 v1, 8, v1 +; GFX906-NEXT: v_lshlrev_b16_e32 v2, 8, v2 +; GFX906-NEXT: v_or_b32_sdwa v1, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX906-NEXT: v_or_b32_sdwa v2, v6, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX906-NEXT: v_bfe_u32 v1, v1, 0, 16 +; GFX906-NEXT: v_bfe_u32 v2, v2, 0, 16 +; GFX906-NEXT: v_lshl_or_b32 v1, v2, 16, v1 +; GFX906-NEXT: v_dot4_u32_u8 v0, v0, v1, v8 +; GFX906-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_udot4_cast_v4i8: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: s_movk_i32 s4, 0xff +; GFX10-NEXT: ; implicit-def: $vcc_hi +; GFX10-NEXT: v_and_b32_sdwa v1, v1, s4 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX10-NEXT: v_and_b32_sdwa v3, v3, s4 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX10-NEXT: v_and_b32_sdwa v5, v5, s4 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX10-NEXT: v_and_b32_sdwa v7, v7, s4 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX10-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX10-NEXT: v_or_b32_sdwa v1, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX10-NEXT: v_or_b32_sdwa v2, v4, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX10-NEXT: v_or_b32_sdwa v3, v6, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX10-NEXT: v_bfe_u32 v0, v0, 0, 16 +; GFX10-NEXT: v_bfe_u32 v1, v1, 0, 16 +; GFX10-NEXT: v_bfe_u32 v2, v2, 0, 16 +; GFX10-NEXT: v_bfe_u32 v3, v3, 0, 16 +; GFX10-NEXT: v_lshl_or_b32 v7, v1, 16, v0 +; GFX10-NEXT: v_lshl_or_b32 v1, v3, 16, v2 +; GFX10-NEXT: v_dot4_u32_u8 v0, v7, v1, v8 +; GFX10-NEXT: s_setpc_b64 s[30:31] + %a.cast = bitcast <4 x i8> %a to i32 + %b.cast = bitcast <4 x i8> %b to i32 + %r = call i32 @llvm.amdgcn.udot4(i32 %a.cast, i32 %b.cast, i32 %c, i1 false) + ret i32 %r +} + +define i32 @v_udot4_fnegf32_a(float %a, i32 %b, i32 %c) { +; GFX906-LABEL: v_udot4_fnegf32_a: +; GFX906: ; %bb.0: +; GFX906-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX906-NEXT: v_dot4_u32_u8 v0, v0, v1, v2 neg_lo:[1,0,0] neg_hi:[1,0,0] +; GFX906-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_udot4_fnegf32_a: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: v_dot4_u32_u8 v0, v0, v1, v2 neg_lo:[1,0,0] neg_hi:[1,0,0] +; GFX10-NEXT: ; implicit-def: $vcc_hi +; GFX10-NEXT: s_setpc_b64 s[30:31] + %neg.a = fneg float %a + %cast.neg.a = bitcast float %neg.a to i32 + %r = call i32 @llvm.amdgcn.udot4(i32 %cast.neg.a, i32 %b, i32 %c, i1 false) + ret i32 %r +} + +define i32 @v_udot4_fnegv2f16_a(<2 x half> %a, i32 %b, i32 %c) { +; GFX906-LABEL: v_udot4_fnegv2f16_a: +; GFX906: ; %bb.0: +; GFX906-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX906-NEXT: v_xor_b32_e32 v0, 0x80008000, v0 +; GFX906-NEXT: v_dot4_u32_u8 v0, v0, v1, v2 +; GFX906-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_udot4_fnegv2f16_a: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: v_xor_b32_e32 v0, 0x80008000, v0 +; GFX10-NEXT: ; implicit-def: $vcc_hi +; GFX10-NEXT: v_dot4_u32_u8 v0, v0, v1, v2 +; GFX10-NEXT: s_setpc_b64 s[30:31] + %neg.a = fneg <2 x half> %a + %cast.neg.a = bitcast <2 x half> %neg.a to i32 + %r = call i32 @llvm.amdgcn.udot4(i32 %cast.neg.a, i32 %b, i32 %c, i1 false) + ret i32 %r +} + +declare i32 @llvm.amdgcn.udot4(i32, i32, i32, i1 immarg) #0 + +attributes #0 = { nounwind readnone speculatable } Index: llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.udot8.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.udot8.ll @@ -0,0 +1,94 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -global-isel -march=amdgcn -mcpu=gfx906 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX906 %s +; RUN: llc -global-isel -march=amdgcn -mcpu=gfx1011 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX10 %s +; RUN: llc -global-isel -march=amdgcn -mcpu=gfx1012 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX10 %s + +define i32 @v_udot8(i32 %a, i32 %b, i32 %c) { +; GFX906-LABEL: v_udot8: +; GFX906: ; %bb.0: +; GFX906-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX906-NEXT: v_dot8_u32_u4 v0, v0, v1, v2 +; GFX906-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_udot8: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: v_dot8_u32_u4 v0, v0, v1, v2 +; GFX10-NEXT: ; implicit-def: $vcc_hi +; GFX10-NEXT: s_setpc_b64 s[30:31] + %r = call i32 @llvm.amdgcn.udot8(i32 %a, i32 %b, i32 %c, i1 false) + ret i32 %r +} + +define i32 @v_udot8_clamp(i32 %a, i32 %b, i32 %c) { +; GFX906-LABEL: v_udot8_clamp: +; GFX906: ; %bb.0: +; GFX906-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX906-NEXT: v_dot8_u32_u4 v0, v0, v1, v2 clamp +; GFX906-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_udot8_clamp: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: v_dot8_u32_u4 v0, v0, v1, v2 clamp +; GFX10-NEXT: ; implicit-def: $vcc_hi +; GFX10-NEXT: s_setpc_b64 s[30:31] + %r = call i32 @llvm.amdgcn.udot8(i32 %a, i32 %b, i32 %c, i1 true) + ret i32 %r +} + +; FIXME: Fix argument do not let these casts expand +; define i32 @v_udot8_cast_v8i4(<8 x i4> %a, <8 x i4> %b, i32 %c) { +; %a.cast = bitcast <8 x i4> %a to i32 +; %b.cast = bitcast <8 x i4> %b to i32 +; %r = call i32 @llvm.amdgcn.udot8(i32 %a.cast, i32 %b.cast, i32 %c, i1 false) +; ret i32 %r +; } + +define i32 @v_udot8_fnegf32_a(float %a, i32 %b, i32 %c) { +; GFX906-LABEL: v_udot8_fnegf32_a: +; GFX906: ; %bb.0: +; GFX906-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX906-NEXT: v_dot8_u32_u4 v0, v0, v1, v2 neg_lo:[1,0,0] neg_hi:[1,0,0] +; GFX906-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_udot8_fnegf32_a: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: v_dot8_u32_u4 v0, v0, v1, v2 neg_lo:[1,0,0] neg_hi:[1,0,0] +; GFX10-NEXT: ; implicit-def: $vcc_hi +; GFX10-NEXT: s_setpc_b64 s[30:31] + %neg.a = fneg float %a + %cast.neg.a = bitcast float %neg.a to i32 + %r = call i32 @llvm.amdgcn.udot8(i32 %cast.neg.a, i32 %b, i32 %c, i1 false) + ret i32 %r +} + +define i32 @v_udot8_fnegv2f16_a(<2 x half> %a, i32 %b, i32 %c) { +; GFX906-LABEL: v_udot8_fnegv2f16_a: +; GFX906: ; %bb.0: +; GFX906-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX906-NEXT: v_xor_b32_e32 v0, 0x80008000, v0 +; GFX906-NEXT: v_dot8_u32_u4 v0, v0, v1, v2 +; GFX906-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_udot8_fnegv2f16_a: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: v_xor_b32_e32 v0, 0x80008000, v0 +; GFX10-NEXT: ; implicit-def: $vcc_hi +; GFX10-NEXT: v_dot8_u32_u4 v0, v0, v1, v2 +; GFX10-NEXT: s_setpc_b64 s[30:31] + %neg.a = fneg <2 x half> %a + %cast.neg.a = bitcast <2 x half> %neg.a to i32 + %r = call i32 @llvm.amdgcn.udot8(i32 %cast.neg.a, i32 %b, i32 %c, i1 false) + ret i32 %r +} + +declare i32 @llvm.amdgcn.udot8(i32, i32, i32, i1 immarg) #0 + +attributes #0 = { nounwind readnone speculatable }