Index: lib/Target/AMDGPU/VOP1Instructions.td =================================================================== --- lib/Target/AMDGPU/VOP1Instructions.td +++ lib/Target/AMDGPU/VOP1Instructions.td @@ -107,6 +107,13 @@ def _sdwa : VOP1_SDWA_Pseudo ; foreach _ = BoolToList.ret in def _dpp : VOP1_DPP_Pseudo ; + + def : MnemonicAlias, LetDummies; + def : MnemonicAlias, LetDummies; + def : MnemonicAlias, LetDummies; + + foreach _ = BoolToList.ret in + def : MnemonicAlias, LetDummies; } // Special profile for instructions which have clamp Index: lib/Target/AMDGPU/VOP2Instructions.td =================================================================== --- lib/Target/AMDGPU/VOP2Instructions.td +++ lib/Target/AMDGPU/VOP2Instructions.td @@ -539,9 +539,9 @@ let SubtargetPredicate = isGFX6GFX7GFX10 in { let isCommutable = 1 in { defm V_MAC_LEGACY_F32 : VOP2Inst <"v_mac_legacy_f32", VOP_F32_F32_F32>; -defm V_LSHR_B32 : VOP2Inst <"v_lshr_b32", VOP_I32_I32_I32>; -defm V_ASHR_I32 : VOP2Inst <"v_ashr_i32", VOP_I32_I32_I32>; -defm V_LSHL_B32 : VOP2Inst <"v_lshl_b32", VOP_I32_I32_I32>; +defm V_LSHR_B32 : VOP2Inst <"v_lshr_b32", VOP_I32_I32_I32, srl>; +defm V_ASHR_I32 : VOP2Inst <"v_ashr_i32", VOP_I32_I32_I32, sra>; +defm V_LSHL_B32 : VOP2Inst <"v_lshl_b32", VOP_I32_I32_I32, shl>; } // End isCommutable = 1 } // End SubtargetPredicate = isGFX6GFX7GFX10 Index: lib/Target/AMDGPU/VOP3Instructions.td =================================================================== --- lib/Target/AMDGPU/VOP3Instructions.td +++ lib/Target/AMDGPU/VOP3Instructions.td @@ -385,12 +385,12 @@ } let SchedRW = [Write64Bit] in { -let SubtargetPredicate = isGFX6GFX7GFX10, Predicates = [isGFX6GFX7GFX10] in { -def V_LSHL_B64 : VOP3Inst <"v_lshl_b64", VOP3_Profile>, shl>; -def V_LSHR_B64 : VOP3Inst <"v_lshr_b64", VOP3_Profile>, srl>; -def V_ASHR_I64 : VOP3Inst <"v_ashr_i64", VOP3_Profile>, sra>; +let SubtargetPredicate = isGFX6GFX7GFX10 in { +def V_LSHL_B64 : VOP3Inst <"v_lshl_b64", VOP3_Profile, shl>; +def V_LSHR_B64 : VOP3Inst <"v_lshr_b64", VOP3_Profile, srl>; +def V_ASHR_I64 : VOP3Inst <"v_ashr_i64", VOP3_Profile, sra>; def V_MULLIT_F32 : VOP3Inst <"v_mullit_f32", VOP3_Profile>; -} // End SubtargetPredicate = isGFX6GFX7GFX10, Predicates = [isGFX6GFX7GFX10] +} // End SubtargetPredicate = isGFX6GFX7GFX10 let SubtargetPredicate = isGFX8Plus in { def V_LSHLREV_B64 : VOP3Inst <"v_lshlrev_b64", VOP3_Profile, lshl_rev>; @@ -399,21 +399,6 @@ } // End SubtargetPredicate = isGFX8Plus } // End SchedRW = [Write64Bit] -let Predicates = [isGFX8Plus] in { -def : GCNPat < - (getDivergentFrag.ret i64:$x, i32:$y), - (V_LSHLREV_B64 $y, $x) ->; -def : AMDGPUPat < - (getDivergentFrag.ret i64:$x, i32:$y), - (V_LSHRREV_B64 $y, $x) ->; -def : AMDGPUPat < - (getDivergentFrag.ret i64:$x, i32:$y), - (V_ASHRREV_I64 $y, $x) ->; -} - let SchedRW = [Write32Bit] in { let SubtargetPredicate = isGFX8Plus in { Index: lib/Target/AMDGPU/VOPInstructions.td =================================================================== --- lib/Target/AMDGPU/VOPInstructions.td +++ lib/Target/AMDGPU/VOPInstructions.td @@ -14,6 +14,7 @@ bit isReMaterializable; bit isAsCheapAsAMove; bit VOPAsmPrefer32Bit; + bit FPDPRounding; Predicate SubtargetPredicate; string Constraints; string DisableEncoding; @@ -41,9 +42,7 @@ string asm, list pattern> : InstSI , VOP , - SIMCInstr , - MnemonicAlias { - + SIMCInstr { let isPseudo = 1; let isCodeGenOnly = 1; let UseNamedOperandTable = 1; @@ -473,8 +472,7 @@ class VOP_SDWA_Pseudo pattern=[]> : InstSI , VOP , - SIMCInstr , - MnemonicAlias { + SIMCInstr { let isPseudo = 1; let isCodeGenOnly = 1; @@ -595,8 +593,7 @@ class VOP_DPP_Pseudo pattern=[]> : InstSI , VOP , - SIMCInstr , - MnemonicAlias { + SIMCInstr { let isPseudo = 1; let isCodeGenOnly = 1; Index: test/CodeGen/AMDGPU/GlobalISel/inst-select-ashr.mir =================================================================== --- test/CodeGen/AMDGPU/GlobalISel/inst-select-ashr.mir +++ test/CodeGen/AMDGPU/GlobalISel/inst-select-ashr.mir @@ -216,13 +216,13 @@ ; GFX6-LABEL: name: ashr_s64_sv ; GFX6: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1 ; GFX6: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX6: [[V_ASHRREV_I64_:%[0-9]+]]:vreg_64 = V_ASHRREV_I64 [[COPY1]], [[COPY]], implicit $exec - ; GFX6: S_ENDPGM 0, implicit [[V_ASHRREV_I64_]] + ; GFX6: [[V_ASHR_I64_:%[0-9]+]]:vreg_64 = V_ASHR_I64 [[COPY]], [[COPY1]], implicit $exec + ; GFX6: S_ENDPGM 0, implicit [[V_ASHR_I64_]] ; GFX7-LABEL: name: ashr_s64_sv ; GFX7: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1 ; GFX7: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX7: [[V_ASHRREV_I64_:%[0-9]+]]:vreg_64 = V_ASHRREV_I64 [[COPY1]], [[COPY]], implicit $exec - ; GFX7: S_ENDPGM 0, implicit [[V_ASHRREV_I64_]] + ; GFX7: [[V_ASHR_I64_:%[0-9]+]]:vreg_64 = V_ASHR_I64 [[COPY]], [[COPY1]], implicit $exec + ; GFX7: S_ENDPGM 0, implicit [[V_ASHR_I64_]] ; GFX8-LABEL: name: ashr_s64_sv ; GFX8: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1 ; GFX8: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 @@ -237,8 +237,8 @@ ; GFX10: $vcc_hi = IMPLICIT_DEF ; GFX10: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1 ; GFX10: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX10: [[V_ASHRREV_I64_:%[0-9]+]]:vreg_64 = V_ASHRREV_I64 [[COPY1]], [[COPY]], implicit $exec - ; GFX10: S_ENDPGM 0, implicit [[V_ASHRREV_I64_]] + ; GFX10: [[V_ASHR_I64_:%[0-9]+]]:vreg_64 = V_ASHR_I64 [[COPY]], [[COPY1]], implicit $exec + ; GFX10: S_ENDPGM 0, implicit [[V_ASHR_I64_]] %0:sgpr(s64) = COPY $sgpr0_sgpr1 %1:vgpr(s32) = COPY $vgpr0 %2:vgpr(s64) = G_ASHR %0, %1 @@ -256,13 +256,13 @@ ; GFX6-LABEL: name: ashr_s64_vs ; GFX6: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 ; GFX6: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 - ; GFX6: [[V_ASHRREV_I64_:%[0-9]+]]:vreg_64 = V_ASHRREV_I64 [[COPY1]], [[COPY]], implicit $exec - ; GFX6: S_ENDPGM 0, implicit [[V_ASHRREV_I64_]] + ; GFX6: [[V_ASHR_I64_:%[0-9]+]]:vreg_64 = V_ASHR_I64 [[COPY]], [[COPY1]], implicit $exec + ; GFX6: S_ENDPGM 0, implicit [[V_ASHR_I64_]] ; GFX7-LABEL: name: ashr_s64_vs ; GFX7: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 ; GFX7: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 - ; GFX7: [[V_ASHRREV_I64_:%[0-9]+]]:vreg_64 = V_ASHRREV_I64 [[COPY1]], [[COPY]], implicit $exec - ; GFX7: S_ENDPGM 0, implicit [[V_ASHRREV_I64_]] + ; GFX7: [[V_ASHR_I64_:%[0-9]+]]:vreg_64 = V_ASHR_I64 [[COPY]], [[COPY1]], implicit $exec + ; GFX7: S_ENDPGM 0, implicit [[V_ASHR_I64_]] ; GFX8-LABEL: name: ashr_s64_vs ; GFX8: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 ; GFX8: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 @@ -277,8 +277,8 @@ ; GFX10: $vcc_hi = IMPLICIT_DEF ; GFX10: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 ; GFX10: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 - ; GFX10: [[V_ASHRREV_I64_:%[0-9]+]]:vreg_64 = V_ASHRREV_I64 [[COPY1]], [[COPY]], implicit $exec - ; GFX10: S_ENDPGM 0, implicit [[V_ASHRREV_I64_]] + ; GFX10: [[V_ASHR_I64_:%[0-9]+]]:vreg_64 = V_ASHR_I64 [[COPY]], [[COPY1]], implicit $exec + ; GFX10: S_ENDPGM 0, implicit [[V_ASHR_I64_]] %0:vgpr(s64) = COPY $vgpr0_vgpr1 %1:sgpr(s32) = COPY $sgpr0 %2:vgpr(s64) = G_ASHR %0, %1 @@ -296,13 +296,13 @@ ; GFX6-LABEL: name: ashr_s64_vv ; GFX6: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 ; GFX6: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX6: [[V_ASHRREV_I64_:%[0-9]+]]:vreg_64 = V_ASHRREV_I64 [[COPY1]], [[COPY]], implicit $exec - ; GFX6: S_ENDPGM 0, implicit [[V_ASHRREV_I64_]] + ; GFX6: [[V_ASHR_I64_:%[0-9]+]]:vreg_64 = V_ASHR_I64 [[COPY]], [[COPY1]], implicit $exec + ; GFX6: S_ENDPGM 0, implicit [[V_ASHR_I64_]] ; GFX7-LABEL: name: ashr_s64_vv ; GFX7: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 ; GFX7: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX7: [[V_ASHRREV_I64_:%[0-9]+]]:vreg_64 = V_ASHRREV_I64 [[COPY1]], [[COPY]], implicit $exec - ; GFX7: S_ENDPGM 0, implicit [[V_ASHRREV_I64_]] + ; GFX7: [[V_ASHR_I64_:%[0-9]+]]:vreg_64 = V_ASHR_I64 [[COPY]], [[COPY1]], implicit $exec + ; GFX7: S_ENDPGM 0, implicit [[V_ASHR_I64_]] ; GFX8-LABEL: name: ashr_s64_vv ; GFX8: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 ; GFX8: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2 @@ -317,11 +317,10 @@ ; GFX10: $vcc_hi = IMPLICIT_DEF ; GFX10: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 ; GFX10: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX10: [[V_ASHRREV_I64_:%[0-9]+]]:vreg_64 = V_ASHRREV_I64 [[COPY1]], [[COPY]], implicit $exec - ; GFX10: S_ENDPGM 0, implicit [[V_ASHRREV_I64_]] + ; GFX10: [[V_ASHR_I64_:%[0-9]+]]:vreg_64 = V_ASHR_I64 [[COPY]], [[COPY1]], implicit $exec + ; GFX10: S_ENDPGM 0, implicit [[V_ASHR_I64_]] %0:vgpr(s64) = COPY $vgpr0_vgpr1 %1:vgpr(s32) = COPY $vgpr2 %2:vgpr(s64) = G_ASHR %0, %1 S_ENDPGM 0, implicit %2 ... - Index: test/CodeGen/AMDGPU/GlobalISel/inst-select-lshr.mir =================================================================== --- test/CodeGen/AMDGPU/GlobalISel/inst-select-lshr.mir +++ test/CodeGen/AMDGPU/GlobalISel/inst-select-lshr.mir @@ -216,13 +216,13 @@ ; GFX6-LABEL: name: lshr_s64_sv ; GFX6: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1 ; GFX6: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX6: [[V_LSHRREV_B64_:%[0-9]+]]:vreg_64 = V_LSHRREV_B64 [[COPY1]], [[COPY]], implicit $exec - ; GFX6: S_ENDPGM 0, implicit [[V_LSHRREV_B64_]] + ; GFX6: [[V_LSHR_B64_:%[0-9]+]]:vreg_64 = V_LSHR_B64 [[COPY]], [[COPY1]], implicit $exec + ; GFX6: S_ENDPGM 0, implicit [[V_LSHR_B64_]] ; GFX7-LABEL: name: lshr_s64_sv ; GFX7: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1 ; GFX7: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX7: [[V_LSHRREV_B64_:%[0-9]+]]:vreg_64 = V_LSHRREV_B64 [[COPY1]], [[COPY]], implicit $exec - ; GFX7: S_ENDPGM 0, implicit [[V_LSHRREV_B64_]] + ; GFX7: [[V_LSHR_B64_:%[0-9]+]]:vreg_64 = V_LSHR_B64 [[COPY]], [[COPY1]], implicit $exec + ; GFX7: S_ENDPGM 0, implicit [[V_LSHR_B64_]] ; GFX8-LABEL: name: lshr_s64_sv ; GFX8: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1 ; GFX8: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 @@ -237,8 +237,8 @@ ; GFX10: $vcc_hi = IMPLICIT_DEF ; GFX10: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1 ; GFX10: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX10: [[V_LSHRREV_B64_:%[0-9]+]]:vreg_64 = V_LSHRREV_B64 [[COPY1]], [[COPY]], implicit $exec - ; GFX10: S_ENDPGM 0, implicit [[V_LSHRREV_B64_]] + ; GFX10: [[V_LSHR_B64_:%[0-9]+]]:vreg_64 = V_LSHR_B64 [[COPY]], [[COPY1]], implicit $exec + ; GFX10: S_ENDPGM 0, implicit [[V_LSHR_B64_]] %0:sgpr(s64) = COPY $sgpr0_sgpr1 %1:vgpr(s32) = COPY $vgpr0 %2:vgpr(s64) = G_LSHR %0, %1 @@ -256,13 +256,13 @@ ; GFX6-LABEL: name: lshr_s64_vs ; GFX6: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 ; GFX6: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 - ; GFX6: [[V_LSHRREV_B64_:%[0-9]+]]:vreg_64 = V_LSHRREV_B64 [[COPY1]], [[COPY]], implicit $exec - ; GFX6: S_ENDPGM 0, implicit [[V_LSHRREV_B64_]] + ; GFX6: [[V_LSHR_B64_:%[0-9]+]]:vreg_64 = V_LSHR_B64 [[COPY]], [[COPY1]], implicit $exec + ; GFX6: S_ENDPGM 0, implicit [[V_LSHR_B64_]] ; GFX7-LABEL: name: lshr_s64_vs ; GFX7: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 ; GFX7: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 - ; GFX7: [[V_LSHRREV_B64_:%[0-9]+]]:vreg_64 = V_LSHRREV_B64 [[COPY1]], [[COPY]], implicit $exec - ; GFX7: S_ENDPGM 0, implicit [[V_LSHRREV_B64_]] + ; GFX7: [[V_LSHR_B64_:%[0-9]+]]:vreg_64 = V_LSHR_B64 [[COPY]], [[COPY1]], implicit $exec + ; GFX7: S_ENDPGM 0, implicit [[V_LSHR_B64_]] ; GFX8-LABEL: name: lshr_s64_vs ; GFX8: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 ; GFX8: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 @@ -277,8 +277,8 @@ ; GFX10: $vcc_hi = IMPLICIT_DEF ; GFX10: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 ; GFX10: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 - ; GFX10: [[V_LSHRREV_B64_:%[0-9]+]]:vreg_64 = V_LSHRREV_B64 [[COPY1]], [[COPY]], implicit $exec - ; GFX10: S_ENDPGM 0, implicit [[V_LSHRREV_B64_]] + ; GFX10: [[V_LSHR_B64_:%[0-9]+]]:vreg_64 = V_LSHR_B64 [[COPY]], [[COPY1]], implicit $exec + ; GFX10: S_ENDPGM 0, implicit [[V_LSHR_B64_]] %0:vgpr(s64) = COPY $vgpr0_vgpr1 %1:sgpr(s32) = COPY $sgpr0 %2:vgpr(s64) = G_LSHR %0, %1 @@ -296,13 +296,13 @@ ; GFX6-LABEL: name: lshr_s64_vv ; GFX6: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 ; GFX6: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX6: [[V_LSHRREV_B64_:%[0-9]+]]:vreg_64 = V_LSHRREV_B64 [[COPY1]], [[COPY]], implicit $exec - ; GFX6: S_ENDPGM 0, implicit [[V_LSHRREV_B64_]] + ; GFX6: [[V_LSHR_B64_:%[0-9]+]]:vreg_64 = V_LSHR_B64 [[COPY]], [[COPY1]], implicit $exec + ; GFX6: S_ENDPGM 0, implicit [[V_LSHR_B64_]] ; GFX7-LABEL: name: lshr_s64_vv ; GFX7: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 ; GFX7: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX7: [[V_LSHRREV_B64_:%[0-9]+]]:vreg_64 = V_LSHRREV_B64 [[COPY1]], [[COPY]], implicit $exec - ; GFX7: S_ENDPGM 0, implicit [[V_LSHRREV_B64_]] + ; GFX7: [[V_LSHR_B64_:%[0-9]+]]:vreg_64 = V_LSHR_B64 [[COPY]], [[COPY1]], implicit $exec + ; GFX7: S_ENDPGM 0, implicit [[V_LSHR_B64_]] ; GFX8-LABEL: name: lshr_s64_vv ; GFX8: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 ; GFX8: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2 @@ -317,8 +317,8 @@ ; GFX10: $vcc_hi = IMPLICIT_DEF ; GFX10: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 ; GFX10: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX10: [[V_LSHRREV_B64_:%[0-9]+]]:vreg_64 = V_LSHRREV_B64 [[COPY1]], [[COPY]], implicit $exec - ; GFX10: S_ENDPGM 0, implicit [[V_LSHRREV_B64_]] + ; GFX10: [[V_LSHR_B64_:%[0-9]+]]:vreg_64 = V_LSHR_B64 [[COPY]], [[COPY1]], implicit $exec + ; GFX10: S_ENDPGM 0, implicit [[V_LSHR_B64_]] %0:vgpr(s64) = COPY $vgpr0_vgpr1 %1:vgpr(s32) = COPY $vgpr2 %2:vgpr(s64) = G_LSHR %0, %1 Index: test/CodeGen/AMDGPU/GlobalISel/inst-select-shl.mir =================================================================== --- test/CodeGen/AMDGPU/GlobalISel/inst-select-shl.mir +++ test/CodeGen/AMDGPU/GlobalISel/inst-select-shl.mir @@ -216,13 +216,13 @@ ; GFX6-LABEL: name: shl_s64_sv ; GFX6: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1 ; GFX6: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX6: [[V_LSHLREV_B64_:%[0-9]+]]:vreg_64 = V_LSHLREV_B64 [[COPY1]], [[COPY]], implicit $exec - ; GFX6: S_ENDPGM 0, implicit [[V_LSHLREV_B64_]] + ; GFX6: [[V_LSHL_B64_:%[0-9]+]]:vreg_64 = V_LSHL_B64 [[COPY]], [[COPY1]], implicit $exec + ; GFX6: S_ENDPGM 0, implicit [[V_LSHL_B64_]] ; GFX7-LABEL: name: shl_s64_sv ; GFX7: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1 ; GFX7: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX7: [[V_LSHLREV_B64_:%[0-9]+]]:vreg_64 = V_LSHLREV_B64 [[COPY1]], [[COPY]], implicit $exec - ; GFX7: S_ENDPGM 0, implicit [[V_LSHLREV_B64_]] + ; GFX7: [[V_LSHL_B64_:%[0-9]+]]:vreg_64 = V_LSHL_B64 [[COPY]], [[COPY1]], implicit $exec + ; GFX7: S_ENDPGM 0, implicit [[V_LSHL_B64_]] ; GFX8-LABEL: name: shl_s64_sv ; GFX8: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1 ; GFX8: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 @@ -237,8 +237,8 @@ ; GFX10: $vcc_hi = IMPLICIT_DEF ; GFX10: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1 ; GFX10: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX10: [[V_LSHLREV_B64_:%[0-9]+]]:vreg_64 = V_LSHLREV_B64 [[COPY1]], [[COPY]], implicit $exec - ; GFX10: S_ENDPGM 0, implicit [[V_LSHLREV_B64_]] + ; GFX10: [[V_LSHL_B64_:%[0-9]+]]:vreg_64 = V_LSHL_B64 [[COPY]], [[COPY1]], implicit $exec + ; GFX10: S_ENDPGM 0, implicit [[V_LSHL_B64_]] %0:sgpr(s64) = COPY $sgpr0_sgpr1 %1:vgpr(s32) = COPY $vgpr0 %2:vgpr(s64) = G_SHL %0, %1 @@ -256,13 +256,13 @@ ; GFX6-LABEL: name: shl_s64_vs ; GFX6: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 ; GFX6: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 - ; GFX6: [[V_LSHLREV_B64_:%[0-9]+]]:vreg_64 = V_LSHLREV_B64 [[COPY1]], [[COPY]], implicit $exec - ; GFX6: S_ENDPGM 0, implicit [[V_LSHLREV_B64_]] + ; GFX6: [[V_LSHL_B64_:%[0-9]+]]:vreg_64 = V_LSHL_B64 [[COPY]], [[COPY1]], implicit $exec + ; GFX6: S_ENDPGM 0, implicit [[V_LSHL_B64_]] ; GFX7-LABEL: name: shl_s64_vs ; GFX7: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 ; GFX7: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 - ; GFX7: [[V_LSHLREV_B64_:%[0-9]+]]:vreg_64 = V_LSHLREV_B64 [[COPY1]], [[COPY]], implicit $exec - ; GFX7: S_ENDPGM 0, implicit [[V_LSHLREV_B64_]] + ; GFX7: [[V_LSHL_B64_:%[0-9]+]]:vreg_64 = V_LSHL_B64 [[COPY]], [[COPY1]], implicit $exec + ; GFX7: S_ENDPGM 0, implicit [[V_LSHL_B64_]] ; GFX8-LABEL: name: shl_s64_vs ; GFX8: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 ; GFX8: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 @@ -277,8 +277,8 @@ ; GFX10: $vcc_hi = IMPLICIT_DEF ; GFX10: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 ; GFX10: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 - ; GFX10: [[V_LSHLREV_B64_:%[0-9]+]]:vreg_64 = V_LSHLREV_B64 [[COPY1]], [[COPY]], implicit $exec - ; GFX10: S_ENDPGM 0, implicit [[V_LSHLREV_B64_]] + ; GFX10: [[V_LSHL_B64_:%[0-9]+]]:vreg_64 = V_LSHL_B64 [[COPY]], [[COPY1]], implicit $exec + ; GFX10: S_ENDPGM 0, implicit [[V_LSHL_B64_]] %0:vgpr(s64) = COPY $vgpr0_vgpr1 %1:sgpr(s32) = COPY $sgpr0 %2:vgpr(s64) = G_SHL %0, %1 @@ -296,13 +296,13 @@ ; GFX6-LABEL: name: shl_s64_vv ; GFX6: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 ; GFX6: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX6: [[V_LSHLREV_B64_:%[0-9]+]]:vreg_64 = V_LSHLREV_B64 [[COPY1]], [[COPY]], implicit $exec - ; GFX6: S_ENDPGM 0, implicit [[V_LSHLREV_B64_]] + ; GFX6: [[V_LSHL_B64_:%[0-9]+]]:vreg_64 = V_LSHL_B64 [[COPY]], [[COPY1]], implicit $exec + ; GFX6: S_ENDPGM 0, implicit [[V_LSHL_B64_]] ; GFX7-LABEL: name: shl_s64_vv ; GFX7: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 ; GFX7: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX7: [[V_LSHLREV_B64_:%[0-9]+]]:vreg_64 = V_LSHLREV_B64 [[COPY1]], [[COPY]], implicit $exec - ; GFX7: S_ENDPGM 0, implicit [[V_LSHLREV_B64_]] + ; GFX7: [[V_LSHL_B64_:%[0-9]+]]:vreg_64 = V_LSHL_B64 [[COPY]], [[COPY1]], implicit $exec + ; GFX7: S_ENDPGM 0, implicit [[V_LSHL_B64_]] ; GFX8-LABEL: name: shl_s64_vv ; GFX8: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 ; GFX8: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2 @@ -317,8 +317,8 @@ ; GFX10: $vcc_hi = IMPLICIT_DEF ; GFX10: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 ; GFX10: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX10: [[V_LSHLREV_B64_:%[0-9]+]]:vreg_64 = V_LSHLREV_B64 [[COPY1]], [[COPY]], implicit $exec - ; GFX10: S_ENDPGM 0, implicit [[V_LSHLREV_B64_]] + ; GFX10: [[V_LSHL_B64_:%[0-9]+]]:vreg_64 = V_LSHL_B64 [[COPY]], [[COPY1]], implicit $exec + ; GFX10: S_ENDPGM 0, implicit [[V_LSHL_B64_]] %0:vgpr(s64) = COPY $vgpr0_vgpr1 %1:vgpr(s32) = COPY $vgpr2 %2:vgpr(s64) = G_SHL %0, %1