Index: lib/Target/AMDGPU/SIInstrInfo.cpp =================================================================== --- lib/Target/AMDGPU/SIInstrInfo.cpp +++ lib/Target/AMDGPU/SIInstrInfo.cpp @@ -1244,7 +1244,7 @@ } } - const MachineOperand *Dst = getNamedOperand(*MI, AMDGPU::OpName::dst); + const MachineOperand *Dst = getNamedOperand(*MI, AMDGPU::OpName::vdst); const MachineOperand *Src0 = getNamedOperand(*MI, AMDGPU::OpName::src0); const MachineOperand *Src1 = getNamedOperand(*MI, AMDGPU::OpName::src1); const MachineOperand *Src2 = getNamedOperand(*MI, AMDGPU::OpName::src2); Index: lib/Target/AMDGPU/SIInstrInfo.td =================================================================== --- lib/Target/AMDGPU/SIInstrInfo.td +++ lib/Target/AMDGPU/SIInstrInfo.td @@ -1089,6 +1089,10 @@ bit HasModifiers> { dag ret = + !if (!eq(NumSrcArgs, 0), + // VOP1 without input operands (V_NOP, V_CLREXCP) + (ins), + /* else */ !if (!eq(NumSrcArgs, 1), !if (!eq(HasModifiers, 1), // VOP1 with modifiers @@ -1118,14 +1122,14 @@ /* else */, // VOP3 without modifiers (ins Src0RC:$src0, Src1RC:$src1, Src2RC:$src2) - /* endif */ ))); + /* endif */ )))); } // Returns the assembly string for the inputs and outputs of a VOP[12C] // instruction. This does not add the _e32 suffix, so it can be reused // by getAsm64. class getAsm32 { - string dst = "$dst"; + string dst = "$vdst"; string src0 = ", $src0"; string src1 = ", $src1"; string src2 = ", $src2"; @@ -1146,7 +1150,7 @@ string ret = !if(!eq(HasModifiers, 0), getAsm32.ret, - "$dst, "#src0#src1#src2#"$clamp"#"$omod"); + "$vdst, "#src0#src1#src2#"$clamp"#"$omod"); } class VOPProfile _ArgVT> { @@ -1169,7 +1173,7 @@ field int NumSrcArgs = getNumSrcArgs.ret; field bit HasModifiers = hasModifiers.ret; - field dag Outs = !if(HasDst,(outs DstRC:$dst),(outs)); + field dag Outs = !if(HasDst,(outs DstRC:$vdst),(outs)); // VOP3b instructions are a special case with a second explicit // output. This is manually overridden for them. @@ -1216,10 +1220,10 @@ // Write out to vcc or arbitrary SGPR. def VOP2b_I32_I1_I32_I32 : VOPProfile<[i32, i32, i32, untyped]> { - let Asm32 = "$dst, vcc, $src0, $src1"; - let Asm64 = "$dst, $sdst, $src0, $src1"; - let Outs32 = (outs DstRC:$dst); - let Outs64 = (outs DstRC:$dst, SReg_64:$sdst); + let Asm32 = "$vdst, vcc, $src0, $src1"; + let Asm64 = "$vdst, $sdst, $src0, $src1"; + let Outs32 = (outs DstRC:$vdst); + let Outs64 = (outs DstRC:$vdst, SReg_64:$sdst); } // Write out to vcc or arbitrary SGPR and read in from vcc or @@ -1231,10 +1235,10 @@ // restriction. SGPRs are still allowed because it should // technically be possible to use VCC again as src0. let Src0RC32 = VCSrc_32; - let Asm32 = "$dst, vcc, $src0, $src1, vcc"; - let Asm64 = "$dst, $sdst, $src0, $src1, $src2"; - let Outs32 = (outs DstRC:$dst); - let Outs64 = (outs DstRC:$dst, SReg_64:$sdst); + let Asm32 = "$vdst, vcc, $src0, $src1, vcc"; + let Asm64 = "$vdst, $sdst, $src0, $src1, $src2"; + let Outs32 = (outs DstRC:$vdst); + let Outs64 = (outs DstRC:$vdst, SReg_64:$sdst); // Suppress src2 implied by type since the 32-bit encoding uses an // implicit VCC use. @@ -1267,7 +1271,7 @@ class VOPC_Class_Profile : VOPC_Profile { let Ins64 = (ins InputModsNoDefault:$src0_modifiers, Src0RC64:$src0, Src1RC64:$src1); - let Asm64 = "$dst, $src0_modifiers, $src1"; + let Asm64 = "$vdst, $src0_modifiers, $src1"; } def VOPC_I1_F32_F32 : VOPC_Profile; @@ -1284,13 +1288,13 @@ def VOP_CNDMASK : VOPProfile <[i32, i32, i32, untyped]> { let Ins32 = (ins Src0RC32:$src0, Src1RC32:$src1); let Ins64 = (ins Src0RC64:$src0, Src1RC64:$src1, SSrc_64:$src2); - let Asm64 = "$dst, $src0, $src1, $src2"; + let Asm64 = "$vdst, $src0, $src1, $src2"; } def VOP_F32_F32_F32_F32 : VOPProfile <[f32, f32, f32, f32]>; def VOP_MADK : VOPProfile <[f32, f32, f32, f32]> { field dag Ins = (ins VCSrc_32:$src0, VGPR_32:$vsrc1, u32imm:$src2); - field string Asm = "$dst, $src0, $vsrc1, $src2"; + field string Asm = "$vdst, $src0, $vsrc1, $src2"; } def VOP_MAC : VOPProfile <[f32, f32, f32, f32]> { let Ins32 = (ins Src0RC32:$src0, Src1RC32:$src1, VGPR_32:$src2); @@ -1313,13 +1317,13 @@ !if (p.HasDst32, !if (!eq(p.NumSrcArgs, 0), // 1 dst, 0 src - (inst p.DstRC:$dst), + (inst p.DstRC:$vdst), !if (!eq(p.NumSrcArgs, 1), // 1 dst, 1 src - (inst p.DstRC:$dst, p.Src0RC32:$src0), + (inst p.DstRC:$vdst, p.Src0RC32:$src0), !if (!eq(p.NumSrcArgs, 2), // 1 dst, 2 src - (inst p.DstRC:$dst, p.Src0RC32:$src0, p.Src1RC32:$src1), + (inst p.DstRC:$vdst, p.Src0RC32:$src0, p.Src1RC32:$src1), // else - unreachable (inst)))), // else @@ -1647,9 +1651,9 @@ SDPatternOperator node = null_frag> : VOP1_Helper < op, opName, P, [], !if(P.HasModifiers, - [(set P.DstVT:$dst, (node (P.Src0VT (VOP3Mods0 P.Src0VT:$src0, + [(set P.DstVT:$vdst, (node (P.Src0VT (VOP3Mods0 P.Src0VT:$src0, i32:$src0_modifiers, i1:$clamp, i32:$omod))))], - [(set P.DstVT:$dst, (node P.Src0VT:$src0))]) + [(set P.DstVT:$vdst, (node P.Src0VT:$src0))]) >; multiclass VOP1InstSI ; } @@ -1679,11 +1683,11 @@ string revOp = opName> : VOP2_Helper < op, opName, P, [], !if(P.HasModifiers, - [(set P.DstVT:$dst, + [(set P.DstVT:$vdst, (node (P.Src0VT (VOP3Mods0 P.Src0VT:$src0, i32:$src0_modifiers, i1:$clamp, i32:$omod)), (P.Src1VT (VOP3Mods P.Src1VT:$src1, i32:$src1_modifiers))))], - [(set P.DstVT:$dst, (node P.Src0VT:$src0, P.Src1VT:$src1))]), + [(set P.DstVT:$vdst, (node P.Src0VT:$src0, P.Src1VT:$src1))]), revOp >; @@ -1695,11 +1699,11 @@ defm _e64 : VOP3SI_2_m ; } @@ -1722,11 +1726,11 @@ string revOp = opName> : VOP2b_Helper < op, opName, P, [], !if(P.HasModifiers, - [(set P.DstVT:$dst, + [(set P.DstVT:$vdst, (node (P.Src0VT (VOP3Mods0 P.Src0VT:$src0, i32:$src0_modifiers, i1:$clamp, i32:$omod)), (P.Src1VT (VOP3Mods P.Src1VT:$src1, i32:$src1_modifiers))))], - [(set P.DstVT:$dst, (node P.Src0VT:$src0, P.Src1VT:$src1))]), + [(set P.DstVT:$vdst, (node P.Src0VT:$src0, P.Src1VT:$src1))]), revOp, !eq(P.NumSrcArgs, 3) >; @@ -1746,11 +1750,11 @@ : VOP2_VI3_Helper < op, opName, P, [], !if(P.HasModifiers, - [(set P.DstVT:$dst, + [(set P.DstVT:$vdst, (node (P.Src0VT (VOP3Mods0 P.Src0VT:$src0, i32:$src0_modifiers, i1:$clamp, i32:$omod)), (P.Src1VT (VOP3Mods P.Src1VT:$src1, i32:$src1_modifiers))))], - [(set P.DstVT:$dst, (node P.Src0VT:$src0, P.Src1VT:$src1))]), + [(set P.DstVT:$vdst, (node P.Src0VT:$src0, P.Src1VT:$src1))]), revOp >; @@ -1823,7 +1827,7 @@ defm _e32 : VOPC_m ; - defm _e64 : VOP3_C_m ; } @@ -1834,7 +1838,7 @@ VOPProfile p, list sched> { defm _e32 : VOPC_m ; - defm _e64 : VOP3_C_m , VOP3DisableModFields<1, 0, 0>; } @@ -1847,12 +1851,12 @@ VOPC_Helper < op, opName, [], !if(P.HasModifiers, - [(set i1:$dst, + [(set i1:$vdst, (setcc (P.Src0VT (VOP3Mods0 P.Src0VT:$src0, i32:$src0_modifiers, i1:$clamp, i32:$omod)), (P.Src1VT (VOP3Mods P.Src1VT:$src1, i32:$src1_modifiers)), cond))], - [(set i1:$dst, (setcc P.Src0VT:$src0, P.Src1VT:$src1, cond))]), + [(set i1:$vdst, (setcc P.Src0VT:$src0, P.Src1VT:$src1, cond))]), DefExec, revOp, P, sched >; @@ -1861,9 +1865,9 @@ list sched> : VOPC_Class_Helper < op, opName, [], !if(P.HasModifiers, - [(set i1:$dst, + [(set i1:$vdst, (AMDGPUfp_class (P.Src0VT (VOP3Mods0Clamp0OMod P.Src0VT:$src0, i32:$src0_modifiers)), P.Src1VT:$src1))], - [(set i1:$dst, (AMDGPUfp_class P.Src0VT:$src0, P.Src1VT:$src1))]), + [(set i1:$vdst, (AMDGPUfp_class P.Src0VT:$src0, P.Src1VT:$src1))]), DefExec, opName, P, sched >; @@ -1918,29 +1922,29 @@ multiclass VOP3Inst : VOP3_Helper < - op, opName, (outs P.DstRC.RegClass:$dst), P.Ins64, P.Asm64, + op, opName, (outs P.DstRC.RegClass:$vdst), P.Ins64, P.Asm64, !if(!eq(P.NumSrcArgs, 3), !if(P.HasModifiers, - [(set P.DstVT:$dst, + [(set P.DstVT:$vdst, (node (P.Src0VT (VOP3Mods0 P.Src0VT:$src0, i32:$src0_modifiers, i1:$clamp, i32:$omod)), (P.Src1VT (VOP3Mods P.Src1VT:$src1, i32:$src1_modifiers)), (P.Src2VT (VOP3Mods P.Src2VT:$src2, i32:$src2_modifiers))))], - [(set P.DstVT:$dst, (node P.Src0VT:$src0, P.Src1VT:$src1, + [(set P.DstVT:$vdst, (node P.Src0VT:$src0, P.Src1VT:$src1, P.Src2VT:$src2))]), !if(!eq(P.NumSrcArgs, 2), !if(P.HasModifiers, - [(set P.DstVT:$dst, + [(set P.DstVT:$vdst, (node (P.Src0VT (VOP3Mods0 P.Src0VT:$src0, i32:$src0_modifiers, i1:$clamp, i32:$omod)), (P.Src1VT (VOP3Mods P.Src1VT:$src1, i32:$src1_modifiers))))], - [(set P.DstVT:$dst, (node P.Src0VT:$src0, P.Src1VT:$src1))]) + [(set P.DstVT:$vdst, (node P.Src0VT:$src0, P.Src1VT:$src1))]) /* P.NumSrcArgs == 1 */, !if(P.HasModifiers, - [(set P.DstVT:$dst, + [(set P.DstVT:$vdst, (node (P.Src0VT (VOP3Mods0 P.Src0VT:$src0, i32:$src0_modifiers, i1:$clamp, i32:$omod))))], - [(set P.DstVT:$dst, (node P.Src0VT:$src0))]))), + [(set P.DstVT:$vdst, (node P.Src0VT:$src0))]))), P.NumSrcArgs, P.HasModifiers >; @@ -1950,14 +1954,14 @@ VOPProfile P, SDPatternOperator node = null_frag> : VOP3_Helper < op, opName, - (outs P.DstRC.RegClass:$dst), + (outs P.DstRC.RegClass:$vdst), (ins InputModsNoDefault:$src0_modifiers, P.Src0RC64:$src0, InputModsNoDefault:$src1_modifiers, P.Src1RC64:$src1, InputModsNoDefault:$src2_modifiers, P.Src2RC64:$src2, ClampMod:$clamp, omod:$omod), - "$dst, $src0_modifiers, $src1_modifiers, $src2_modifiers"#"$clamp"#"$omod", - [(set P.DstVT:$dst, + "$vdst, $src0_modifiers, $src1_modifiers, $src2_modifiers"#"$clamp"#"$omod", + [(set P.DstVT:$vdst, (node (P.Src0VT (VOP3Mods0 P.Src0VT:$src0, i32:$src0_modifiers, i1:$clamp, i32:$omod)), (P.Src1VT (VOP3Mods P.Src1VT:$src1, i32:$src1_modifiers)), Index: lib/Target/AMDGPU/SIInstructions.td =================================================================== --- lib/Target/AMDGPU/SIInstructions.td +++ lib/Target/AMDGPU/SIInstructions.td @@ -1501,7 +1501,7 @@ defm V_OR_B32 : VOP2Inst , "v_or_b32", VOP_I32_I32_I32>; defm V_XOR_B32 : VOP2Inst , "v_xor_b32", VOP_I32_I32_I32>; -let Constraints = "$dst = $src2", DisableEncoding="$src2", +let Constraints = "$vdst = $src2", DisableEncoding="$src2", isConvertibleToThreeAddress = 1 in { defm V_MAC_F32 : VOP2Inst , "v_mac_f32", VOP_MAC>; } @@ -1848,14 +1848,14 @@ let isCodeGenOnly = 1, isPseudo = 1 in { // For use in patterns -def V_CNDMASK_B64_PSEUDO : VOP3Common <(outs VReg_64:$dst), +def V_CNDMASK_B64_PSEUDO : VOP3Common <(outs VReg_64:$vdst), (ins VSrc_64:$src0, VSrc_64:$src1, SSrc_64:$src2), "", [] >; let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Uses = [EXEC] in { // 64-bit vector move instruction. This is mainly used by the SIFoldOperands // pass to enable folding of inline immediates. -def V_MOV_B64_PSEUDO : InstSI <(outs VReg_64:$dst), (ins VSrc_64:$src0), "", []>; +def V_MOV_B64_PSEUDO : InstSI <(outs VReg_64:$vdst), (ins VSrc_64:$src0), "", []>; } // End let hasSideEffects = 0, mayLoad = 0, mayStore = 0 let hasSideEffects = 1, SALU = 1 in { Index: lib/Target/AMDGPU/SIShrinkInstructions.cpp =================================================================== --- lib/Target/AMDGPU/SIShrinkInstructions.cpp +++ lib/Target/AMDGPU/SIShrinkInstructions.cpp @@ -287,9 +287,9 @@ MachineInstrBuilder Inst32 = BuildMI(MBB, I, MI.getDebugLoc(), TII->get(Op32)); - // Add the dst operand if the 32-bit encoding also has an explicit $dst. + // Add the dst operand if the 32-bit encoding also has an explicit $vdst. // For VOPC instructions, this is replaced by an implicit def of vcc. - int Op32DstIdx = AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::dst); + int Op32DstIdx = AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::vdst); if (Op32DstIdx != -1) { // dst Inst32.addOperand(MI.getOperand(0));