Index: llvm/trunk/lib/Target/AMDGPU/SOPInstructions.td =================================================================== --- llvm/trunk/lib/Target/AMDGPU/SOPInstructions.td +++ llvm/trunk/lib/Target/AMDGPU/SOPInstructions.td @@ -442,19 +442,19 @@ [(set i32:$sdst, (UniformBinFrag i32:$src0, i32:$src1))] >; def S_LSHL_B64 : SOP2_64_32 <"s_lshl_b64", - [(set i64:$sdst, (shl i64:$src0, i32:$src1))] + [(set i64:$sdst, (UniformBinFrag i64:$src0, i32:$src1))] >; def S_LSHR_B32 : SOP2_32 <"s_lshr_b32", [(set i32:$sdst, (UniformBinFrag i32:$src0, i32:$src1))] >; def S_LSHR_B64 : SOP2_64_32 <"s_lshr_b64", - [(set i64:$sdst, (srl i64:$src0, i32:$src1))] + [(set i64:$sdst, (UniformBinFrag i64:$src0, i32:$src1))] >; def S_ASHR_I32 : SOP2_32 <"s_ashr_i32", [(set i32:$sdst, (UniformBinFrag i32:$src0, i32:$src1))] >; def S_ASHR_I64 : SOP2_64_32 <"s_ashr_i64", - [(set i64:$sdst, (sra i64:$src0, i32:$src1))] + [(set i64:$sdst, (UniformBinFrag i64:$src0, i32:$src1))] >; } // End Defs = [SCC] Index: llvm/trunk/lib/Target/AMDGPU/VOP3Instructions.td =================================================================== --- llvm/trunk/lib/Target/AMDGPU/VOP3Instructions.td +++ llvm/trunk/lib/Target/AMDGPU/VOP3Instructions.td @@ -17,16 +17,16 @@ (VOP3Mods0 P.Src0VT:$src0, i32:$src0_modifiers, i1:$clamp)); list ret3 = [(set P.DstVT:$vdst, - (node (P.Src0VT src0), + (DivergentFragOrOp.ret (P.Src0VT src0), (P.Src1VT (VOP3Mods P.Src1VT:$src1, i32:$src1_modifiers)), (P.Src2VT (VOP3Mods P.Src2VT:$src2, i32:$src2_modifiers))))]; list ret2 = [(set P.DstVT:$vdst, - (node (P.Src0VT src0), + (DivergentFragOrOp.ret (P.Src0VT src0), (P.Src1VT (VOP3Mods P.Src1VT:$src1, i32:$src1_modifiers))))]; list ret1 = [(set P.DstVT:$vdst, - (node (P.Src0VT src0)))]; + (DivergentFragOrOp.ret (P.Src0VT src0)))]; list ret = !if(!eq(P.NumSrcArgs, 3), ret3, !if(!eq(P.NumSrcArgs, 2), ret2, @@ -35,18 +35,18 @@ class getVOP3PModPat { list ret3 = [(set P.DstVT:$vdst, - (node (P.Src0VT !if(P.HasClamp, (VOP3PMods0 P.Src0VT:$src0, i32:$src0_modifiers, i1:$clamp), + (DivergentFragOrOp.ret (P.Src0VT !if(P.HasClamp, (VOP3PMods0 P.Src0VT:$src0, i32:$src0_modifiers, i1:$clamp), (VOP3PMods P.Src0VT:$src0, i32:$src0_modifiers))), (P.Src1VT (VOP3PMods P.Src1VT:$src1, i32:$src1_modifiers)), (P.Src2VT (VOP3PMods P.Src2VT:$src2, i32:$src2_modifiers))))]; list ret2 = [(set P.DstVT:$vdst, - (node !if(P.HasClamp, (P.Src0VT (VOP3PMods0 P.Src0VT:$src0, i32:$src0_modifiers, i1:$clamp)), + (DivergentFragOrOp.ret !if(P.HasClamp, (P.Src0VT (VOP3PMods0 P.Src0VT:$src0, i32:$src0_modifiers, i1:$clamp)), (P.Src0VT (VOP3PMods P.Src0VT:$src0, i32:$src0_modifiers))), (P.Src1VT (VOP3PMods P.Src1VT:$src1, i32:$src1_modifiers))))]; list ret1 = [(set P.DstVT:$vdst, - (node (P.Src0VT (VOP3PMods0 P.Src0VT:$src0, i32:$src0_modifiers, i1:$clamp))))]; + (DivergentFragOrOp.ret (P.Src0VT (VOP3PMods0 P.Src0VT:$src0, i32:$src0_modifiers, i1:$clamp))))]; list ret = !if(!eq(P.NumSrcArgs, 3), ret3, !if(!eq(P.NumSrcArgs, 2), ret2, @@ -55,18 +55,18 @@ class getVOP3OpSelPat { list ret3 = [(set P.DstVT:$vdst, - (node (P.Src0VT !if(P.HasClamp, (VOP3OpSel0 P.Src0VT:$src0, i32:$src0_modifiers, i1:$clamp), + (DivergentFragOrOp.ret (P.Src0VT !if(P.HasClamp, (VOP3OpSel0 P.Src0VT:$src0, i32:$src0_modifiers, i1:$clamp), (VOP3OpSel P.Src0VT:$src0, i32:$src0_modifiers))), (P.Src1VT (VOP3OpSel P.Src1VT:$src1, i32:$src1_modifiers)), (P.Src2VT (VOP3OpSel P.Src2VT:$src2, i32:$src2_modifiers))))]; list ret2 = [(set P.DstVT:$vdst, - (node !if(P.HasClamp, (P.Src0VT (VOP3OpSel0 P.Src0VT:$src0, i32:$src0_modifiers, i1:$clamp)), + (DivergentFragOrOp.ret !if(P.HasClamp, (P.Src0VT (VOP3OpSel0 P.Src0VT:$src0, i32:$src0_modifiers, i1:$clamp)), (P.Src0VT (VOP3OpSel P.Src0VT:$src0, i32:$src0_modifiers))), (P.Src1VT (VOP3OpSel P.Src1VT:$src1, i32:$src1_modifiers))))]; list ret1 = [(set P.DstVT:$vdst, - (node (P.Src0VT (VOP3OpSel0 P.Src0VT:$src0, i32:$src0_modifiers, i1:$clamp))))]; + (DivergentFragOrOp.ret (P.Src0VT (VOP3OpSel0 P.Src0VT:$src0, i32:$src0_modifiers, i1:$clamp))))]; list ret = !if(!eq(P.NumSrcArgs, 3), ret3, !if(!eq(P.NumSrcArgs, 2), ret2, @@ -75,18 +75,18 @@ class getVOP3OpSelModPat { list ret3 = [(set P.DstVT:$vdst, - (node (P.Src0VT !if(P.HasClamp, (VOP3OpSelMods0 P.Src0VT:$src0, i32:$src0_modifiers, i1:$clamp), + (DivergentFragOrOp.ret (P.Src0VT !if(P.HasClamp, (VOP3OpSelMods0 P.Src0VT:$src0, i32:$src0_modifiers, i1:$clamp), (VOP3OpSelMods P.Src0VT:$src0, i32:$src0_modifiers))), (P.Src1VT (VOP3OpSelMods P.Src1VT:$src1, i32:$src1_modifiers)), (P.Src2VT (VOP3OpSelMods P.Src2VT:$src2, i32:$src2_modifiers))))]; list ret2 = [(set P.DstVT:$vdst, - (node !if(P.HasClamp, (P.Src0VT (VOP3OpSelMods0 P.Src0VT:$src0, i32:$src0_modifiers, i1:$clamp)), + (DivergentFragOrOp.ret !if(P.HasClamp, (P.Src0VT (VOP3OpSelMods0 P.Src0VT:$src0, i32:$src0_modifiers, i1:$clamp)), (P.Src0VT (VOP3OpSelMods P.Src0VT:$src0, i32:$src0_modifiers))), (P.Src1VT (VOP3OpSelMods P.Src1VT:$src1, i32:$src1_modifiers))))]; list ret1 = [(set P.DstVT:$vdst, - (node (P.Src0VT (VOP3OpSelMods0 P.Src0VT:$src0, i32:$src0_modifiers, i1:$clamp))))]; + (DivergentFragOrOp.ret (P.Src0VT (VOP3OpSelMods0 P.Src0VT:$src0, i32:$src0_modifiers, i1:$clamp))))]; list ret = !if(!eq(P.NumSrcArgs, 3), ret3, !if(!eq(P.NumSrcArgs, 2), ret2, @@ -94,9 +94,9 @@ } class getVOP3Pat { - list ret3 = [(set P.DstVT:$vdst, (node P.Src0VT:$src0, P.Src1VT:$src1, P.Src2VT:$src2))]; - list ret2 = [(set P.DstVT:$vdst, (node P.Src0VT:$src0, P.Src1VT:$src1))]; - list ret1 = [(set P.DstVT:$vdst, (node P.Src0VT:$src0))]; + list ret3 = [(set P.DstVT:$vdst, (DivergentFragOrOp.ret P.Src0VT:$src0, P.Src1VT:$src1, P.Src2VT:$src2))]; + list ret2 = [(set P.DstVT:$vdst, (DivergentFragOrOp.ret P.Src0VT:$src0, P.Src1VT:$src1))]; + list ret1 = [(set P.DstVT:$vdst, (DivergentFragOrOp.ret P.Src0VT:$src0))]; list ret = !if(!eq(P.NumSrcArgs, 3), ret3, !if(!eq(P.NumSrcArgs, 2), ret2, ret1)); @@ -185,6 +185,7 @@ getAsm64.ret, P.Asm64)); + let NeedPatGen = P.NeedPatGen; } class VOP3b_Profile : VOPProfile<[vt, vt, vt, vt]> { @@ -381,12 +382,12 @@ let SchedRW = [Write64Bit] in { // These instructions only exist on SI and CI -let SubtargetPredicate = isSICI in { -def V_LSHL_B64 : VOP3Inst <"v_lshl_b64", VOP3_Profile>; -def V_LSHR_B64 : VOP3Inst <"v_lshr_b64", VOP3_Profile>; -def V_ASHR_I64 : VOP3Inst <"v_ashr_i64", VOP3_Profile>; +let SubtargetPredicate = isSICI, Predicates = [isSICI] in { +def V_LSHL_B64 : VOP3Inst <"v_lshl_b64", VOP3_Profile>, shl>; +def V_LSHR_B64 : VOP3Inst <"v_lshr_b64", VOP3_Profile>, srl>; +def V_ASHR_I64 : VOP3Inst <"v_ashr_i64", VOP3_Profile>, sra>; def V_MULLIT_F32 : VOP3Inst <"v_mullit_f32", VOP3_Profile>; -} // End SubtargetPredicate = isSICI +} // End SubtargetPredicate = isSICI, Predicates = [isSICI] let SubtargetPredicate = isVI in { def V_LSHLREV_B64 : VOP3Inst <"v_lshlrev_b64", VOP3_Profile>; @@ -395,6 +396,22 @@ } // End SubtargetPredicate = isVI } // End SchedRW = [Write64Bit] +let Predicates = [isVI] in { +def : GCNPat < + (getDivergentFrag.ret i64:$x, i32:$y), + (V_LSHLREV_B64 $y, $x) +>; +def : AMDGPUPat < + (getDivergentFrag.ret i64:$x, i32:$y), + (V_LSHRREV_B64 $y, $x) +>; +def : AMDGPUPat < + (getDivergentFrag.ret i64:$x, i32:$y), + (V_ASHRREV_I64 $y, $x) +>; +} + + let SubtargetPredicate = isCIVI in { let Constraints = "@earlyclobber $vdst", SchedRW = [WriteQuarterRate32] in { Index: llvm/trunk/lib/Target/AMDGPU/VOPInstructions.td =================================================================== --- llvm/trunk/lib/Target/AMDGPU/VOPInstructions.td +++ llvm/trunk/lib/Target/AMDGPU/VOPInstructions.td @@ -572,6 +572,11 @@ list ret = !if(!ne(P.NeedPatGen,PatGenMode.NoPattern), VOPPatGen.ret, []); } +class DivergentFragOrOp { + SDPatternOperator ret = !if(!eq(P.NeedPatGen,PatGenMode.Pattern), + !if(!isa(Op), getDivergentFrag.ret, Op), Op); +} + include "VOPCInstructions.td" include "VOP1Instructions.td" include "VOP2Instructions.td"