Index: llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp =================================================================== --- llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp +++ llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp @@ -1684,6 +1684,8 @@ case TargetOpcode::G_SEXT: case TargetOpcode::G_ZEXT: case TargetOpcode::G_ANYEXT: + if (selectImpl(I, *CoverageInfo)) + return true; return selectG_SZA_EXT(I); case TargetOpcode::G_BRCOND: return selectG_BRCOND(I); Index: llvm/lib/Target/AMDGPU/VOP2Instructions.td =================================================================== --- llvm/lib/Target/AMDGPU/VOP2Instructions.td +++ llvm/lib/Target/AMDGPU/VOP2Instructions.td @@ -729,14 +729,14 @@ def : GCNPat< (i32 (zext (op i16:$src0, i16:$src1))), - (inst $src0, $src1) + (inst VSrc_b16:$src0, VSrc_b16:$src1) >; def : GCNPat< (i64 (zext (op i16:$src0, i16:$src1))), - (REG_SEQUENCE VReg_64, - (inst $src0, $src1), sub0, - (V_MOV_B32_e32 (i32 0)), sub1) + (REG_SEQUENCE VReg_64, + (inst VSrc_b16:$src0, VSrc_b16:$src1), sub0, + (V_MOV_B32_e32 (i32 0)), sub1) >; } @@ -771,7 +771,7 @@ // TODO: Also do for 64-bit. def : GCNPat< (add i16:$src0, (i16 NegSubInlineConst16:$src1)), - (V_SUB_U16_e64 $src0, NegSubInlineConst16:$src1) + (V_SUB_U16_e64 VSrc_b16:$src0, NegSubInlineConst16:$src1) >; @@ -779,7 +779,7 @@ def : GCNPat< (i32 (zext (add i16:$src0, (i16 NegSubInlineConst16:$src1)))), - (V_SUB_U16_e64 $src0, NegSubInlineConst16:$src1) + (V_SUB_U16_e64 VSrc_b16:$src0, NegSubInlineConst16:$src1) >; defm : Arithmetic_i16_0Hi_Pats; Index: llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-add.s16.mir =================================================================== --- /dev/null +++ llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-add.s16.mir @@ -0,0 +1,132 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=instruction-select -verify-machineinstrs -o - %s | FileCheck -check-prefix=GFX6 %s +# RUN: llc -march=amdgcn -mcpu=gfx900 -run-pass=instruction-select -verify-machineinstrs -o - %s | FileCheck -check-prefix=GFX6 %s +# RUN: llc -march=amdgcn -mcpu=gfx1010 -run-pass=instruction-select -verify-machineinstrs -o - %s | FileCheck -check-prefix=GFX10 %s + +# Note: 16-bit instructions generally produce a 0 result in the high 16-bits on GFX8 and GFX9 and preserve high 16 bits on GFX10+ + +--- +name: add_s16 +legalized: true +regBankSelected: true +tracksRegLiveness: true + +body: | + bb.0: + liveins: $vgpr0, $vgpr1 + + ; GFX6-LABEL: name: add_s16 + ; GFX6: liveins: $vgpr0, $vgpr1 + ; GFX6: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX6: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX6: [[V_ADD_U16_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U16_e64 [[COPY]], [[COPY1]], implicit $exec + ; GFX6: S_ENDPGM 0, implicit [[V_ADD_U16_e64_]] + ; GFX10-LABEL: name: add_s16 + ; GFX10: liveins: $vgpr0, $vgpr1 + ; GFX10: $vcc_hi = IMPLICIT_DEF + ; GFX10: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX10: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX10: [[V_ADD_U16_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U16_e64 [[COPY]], [[COPY1]], implicit $exec + ; GFX10: S_ENDPGM 0, implicit [[V_ADD_U16_e64_]] + %0:vgpr(s32) = COPY $vgpr0 + %1:vgpr(s32) = COPY $vgpr1 + %2:vgpr(s16) = G_TRUNC %0 + %3:vgpr(s16) = G_TRUNC %1 + %4:vgpr(s16) = G_ADD %2, %3 + S_ENDPGM 0, implicit %4 + +... + +--- +name: add_s16_zext_to_s32 +legalized: true +regBankSelected: true +tracksRegLiveness: true + +body: | + bb.0: + liveins: $vgpr0, $vgpr1 + + ; GFX6-LABEL: name: add_s16_zext_to_s32 + ; GFX6: liveins: $vgpr0, $vgpr1 + ; GFX6: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX6: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX6: [[V_ADD_U16_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U16_e64 [[COPY]], [[COPY1]], implicit $exec + ; GFX6: S_ENDPGM 0, implicit [[V_ADD_U16_e64_]] + ; GFX10-LABEL: name: add_s16_zext_to_s32 + ; GFX10: liveins: $vgpr0, $vgpr1 + ; GFX10: $vcc_hi = IMPLICIT_DEF + ; GFX10: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX10: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX10: [[V_ADD_U16_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U16_e64 [[COPY]], [[COPY1]], implicit $exec + ; GFX10: [[V_BFE_U32_:%[0-9]+]]:vgpr_32 = V_BFE_U32 [[V_ADD_U16_e64_]], 0, 16, implicit $exec + ; GFX10: S_ENDPGM 0, implicit [[V_BFE_U32_]] + %0:vgpr(s32) = COPY $vgpr0 + %1:vgpr(s32) = COPY $vgpr1 + %2:vgpr(s16) = G_TRUNC %0 + %3:vgpr(s16) = G_TRUNC %1 + %4:vgpr(s16) = G_ADD %2, %3 + %5:vgpr(s32) = G_ZEXT %4 + S_ENDPGM 0, implicit %5 + +... + +--- +name: add_s16_neg_inline_const_64 +legalized: true +regBankSelected: true +tracksRegLiveness: true + +body: | + bb.0: + liveins: $vgpr0 + + ; GFX6-LABEL: name: add_s16_neg_inline_const_64 + ; GFX6: liveins: $vgpr0 + ; GFX6: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX6: [[V_SUB_U16_e64_:%[0-9]+]]:vgpr_32 = V_SUB_U16_e64 [[COPY]], 64, implicit $exec + ; GFX6: S_ENDPGM 0, implicit [[V_SUB_U16_e64_]] + ; GFX10-LABEL: name: add_s16_neg_inline_const_64 + ; GFX10: liveins: $vgpr0 + ; GFX10: $vcc_hi = IMPLICIT_DEF + ; GFX10: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX10: [[V_SUB_U16_e64_:%[0-9]+]]:vgpr_32 = V_SUB_U16_e64 [[COPY]], 64, implicit $exec + ; GFX10: S_ENDPGM 0, implicit [[V_SUB_U16_e64_]] + %0:vgpr(s32) = COPY $vgpr0 + %1:vgpr(s16) = G_TRUNC %0 + %2:vgpr(s16) = G_CONSTANT i16 -64 + %3:vgpr(s16) = G_ADD %1, %2 + S_ENDPGM 0, implicit %3 + +... + +--- +name: add_s16_neg_inline_const_64_zext_to_s32 +legalized: true +regBankSelected: true +tracksRegLiveness: true + +body: | + bb.0: + liveins: $vgpr0 + + ; GFX6-LABEL: name: add_s16_neg_inline_const_64_zext_to_s32 + ; GFX6: liveins: $vgpr0 + ; GFX6: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX6: [[V_SUB_U16_e64_:%[0-9]+]]:vgpr_32 = V_SUB_U16_e64 [[COPY]], 64, implicit $exec + ; GFX6: S_ENDPGM 0, implicit [[V_SUB_U16_e64_]] + ; GFX10-LABEL: name: add_s16_neg_inline_const_64_zext_to_s32 + ; GFX10: liveins: $vgpr0 + ; GFX10: $vcc_hi = IMPLICIT_DEF + ; GFX10: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX10: [[V_SUB_U16_e64_:%[0-9]+]]:vgpr_32 = V_SUB_U16_e64 [[COPY]], 64, implicit $exec + ; GFX10: [[V_BFE_U32_:%[0-9]+]]:vgpr_32 = V_BFE_U32 [[V_SUB_U16_e64_]], 0, 16, implicit $exec + ; GFX10: S_ENDPGM 0, implicit [[V_BFE_U32_]] + %0:vgpr(s32) = COPY $vgpr0 + %1:vgpr(s16) = G_TRUNC %0 + %2:vgpr(s16) = G_CONSTANT i16 -64 + %3:vgpr(s16) = G_ADD %1, %2 + %4:vgpr(s32) = G_ZEXT %3 + S_ENDPGM 0, implicit %4 + +... Index: llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ashr.s16.mir =================================================================== --- llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ashr.s16.mir +++ llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ashr.s16.mir @@ -168,14 +168,12 @@ ; GFX8: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GFX8: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; GFX8: [[V_ASHRREV_I16_e64_:%[0-9]+]]:vgpr_32 = V_ASHRREV_I16_e64 [[COPY1]], [[COPY]], implicit $exec - ; GFX8: [[V_BFE_U32_:%[0-9]+]]:vgpr_32 = V_BFE_U32 [[V_ASHRREV_I16_e64_]], 0, 16, implicit $exec - ; GFX8: S_ENDPGM 0, implicit [[V_BFE_U32_]] + ; GFX8: S_ENDPGM 0, implicit [[V_ASHRREV_I16_e64_]] ; GFX9-LABEL: name: ashr_s16_s16_vv_zext_to_s32 ; GFX9: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GFX9: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; GFX9: [[V_ASHRREV_I16_e64_:%[0-9]+]]:vgpr_32 = V_ASHRREV_I16_e64 [[COPY1]], [[COPY]], implicit $exec - ; GFX9: [[V_BFE_U32_:%[0-9]+]]:vgpr_32 = V_BFE_U32 [[V_ASHRREV_I16_e64_]], 0, 16, implicit $exec - ; GFX9: S_ENDPGM 0, implicit [[V_BFE_U32_]] + ; GFX9: S_ENDPGM 0, implicit [[V_ASHRREV_I16_e64_]] ; GFX10-LABEL: name: ashr_s16_s16_vv_zext_to_s32 ; GFX10: $vcc_hi = IMPLICIT_DEF ; GFX10: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 @@ -202,21 +200,19 @@ liveins: $vgpr0, $vgpr1 ; GFX8-LABEL: name: ashr_s16_vv_zext_to_s64 - ; GFX8: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 - ; GFX8: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1 - ; GFX8: [[TRUNC:%[0-9]+]]:vgpr(s16) = G_TRUNC [[COPY]](s32) - ; GFX8: [[TRUNC1:%[0-9]+]]:vgpr(s16) = G_TRUNC [[COPY1]](s32) - ; GFX8: [[ASHR:%[0-9]+]]:vgpr(s16) = G_ASHR [[TRUNC]], [[TRUNC1]](s16) - ; GFX8: [[ZEXT:%[0-9]+]]:vgpr(s64) = G_ZEXT [[ASHR]](s16) - ; GFX8: S_ENDPGM 0, implicit [[ZEXT]](s64) + ; GFX8: [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0 + ; GFX8: [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1 + ; GFX8: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32(s16) = V_MOV_B32_e32 0, implicit $exec + ; GFX8: [[V_ASHRREV_I16_e64_:%[0-9]+]]:vgpr_32(s16) = V_ASHRREV_I16_e64 [[COPY1]](s32), [[COPY]](s32), implicit $exec + ; GFX8: [[REG_SEQUENCE:%[0-9]+]]:vgpr(s64) = REG_SEQUENCE [[V_ASHRREV_I16_e64_]](s16), %subreg.sub0, [[V_MOV_B32_e32_]](s16), %subreg.sub1 + ; GFX8: S_ENDPGM 0, implicit [[REG_SEQUENCE]](s64) ; GFX9-LABEL: name: ashr_s16_vv_zext_to_s64 - ; GFX9: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 - ; GFX9: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1 - ; GFX9: [[TRUNC:%[0-9]+]]:vgpr(s16) = G_TRUNC [[COPY]](s32) - ; GFX9: [[TRUNC1:%[0-9]+]]:vgpr(s16) = G_TRUNC [[COPY1]](s32) - ; GFX9: [[ASHR:%[0-9]+]]:vgpr(s16) = G_ASHR [[TRUNC]], [[TRUNC1]](s16) - ; GFX9: [[ZEXT:%[0-9]+]]:vgpr(s64) = G_ZEXT [[ASHR]](s16) - ; GFX9: S_ENDPGM 0, implicit [[ZEXT]](s64) + ; GFX9: [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0 + ; GFX9: [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1 + ; GFX9: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32(s16) = V_MOV_B32_e32 0, implicit $exec + ; GFX9: [[V_ASHRREV_I16_e64_:%[0-9]+]]:vgpr_32(s16) = V_ASHRREV_I16_e64 [[COPY1]](s32), [[COPY]](s32), implicit $exec + ; GFX9: [[REG_SEQUENCE:%[0-9]+]]:vgpr(s64) = REG_SEQUENCE [[V_ASHRREV_I16_e64_]](s16), %subreg.sub0, [[V_MOV_B32_e32_]](s16), %subreg.sub1 + ; GFX9: S_ENDPGM 0, implicit [[REG_SEQUENCE]](s64) ; GFX10-LABEL: name: ashr_s16_vv_zext_to_s64 ; GFX10: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 ; GFX10: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1 Index: llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-lshr.s16.mir =================================================================== --- llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-lshr.s16.mir +++ llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-lshr.s16.mir @@ -168,14 +168,12 @@ ; GFX8: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GFX8: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; GFX8: [[V_LSHRREV_B16_e64_:%[0-9]+]]:vgpr_32 = V_LSHRREV_B16_e64 [[COPY1]], [[COPY]], implicit $exec - ; GFX8: [[V_BFE_U32_:%[0-9]+]]:vgpr_32 = V_BFE_U32 [[V_LSHRREV_B16_e64_]], 0, 16, implicit $exec - ; GFX8: S_ENDPGM 0, implicit [[V_BFE_U32_]] + ; GFX8: S_ENDPGM 0, implicit [[V_LSHRREV_B16_e64_]] ; GFX9-LABEL: name: lshr_s16_s16_vv_zext_to_s32 ; GFX9: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GFX9: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; GFX9: [[V_LSHRREV_B16_e64_:%[0-9]+]]:vgpr_32 = V_LSHRREV_B16_e64 [[COPY1]], [[COPY]], implicit $exec - ; GFX9: [[V_BFE_U32_:%[0-9]+]]:vgpr_32 = V_BFE_U32 [[V_LSHRREV_B16_e64_]], 0, 16, implicit $exec - ; GFX9: S_ENDPGM 0, implicit [[V_BFE_U32_]] + ; GFX9: S_ENDPGM 0, implicit [[V_LSHRREV_B16_e64_]] ; GFX10-LABEL: name: lshr_s16_s16_vv_zext_to_s32 ; GFX10: $vcc_hi = IMPLICIT_DEF ; GFX10: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 @@ -202,21 +200,19 @@ liveins: $vgpr0, $vgpr1 ; GFX8-LABEL: name: lshr_s16_vv_zext_to_s64 - ; GFX8: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 - ; GFX8: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1 - ; GFX8: [[TRUNC:%[0-9]+]]:vgpr(s16) = G_TRUNC [[COPY]](s32) - ; GFX8: [[TRUNC1:%[0-9]+]]:vgpr(s16) = G_TRUNC [[COPY1]](s32) - ; GFX8: [[LSHR:%[0-9]+]]:vgpr(s16) = G_LSHR [[TRUNC]], [[TRUNC1]](s16) - ; GFX8: [[ZEXT:%[0-9]+]]:vgpr(s64) = G_ZEXT [[LSHR]](s16) - ; GFX8: S_ENDPGM 0, implicit [[ZEXT]](s64) + ; GFX8: [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0 + ; GFX8: [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1 + ; GFX8: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32(s16) = V_MOV_B32_e32 0, implicit $exec + ; GFX8: [[V_LSHRREV_B16_e64_:%[0-9]+]]:vgpr_32(s16) = V_LSHRREV_B16_e64 [[COPY1]](s32), [[COPY]](s32), implicit $exec + ; GFX8: [[REG_SEQUENCE:%[0-9]+]]:vgpr(s64) = REG_SEQUENCE [[V_LSHRREV_B16_e64_]](s16), %subreg.sub0, [[V_MOV_B32_e32_]](s16), %subreg.sub1 + ; GFX8: S_ENDPGM 0, implicit [[REG_SEQUENCE]](s64) ; GFX9-LABEL: name: lshr_s16_vv_zext_to_s64 - ; GFX9: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 - ; GFX9: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1 - ; GFX9: [[TRUNC:%[0-9]+]]:vgpr(s16) = G_TRUNC [[COPY]](s32) - ; GFX9: [[TRUNC1:%[0-9]+]]:vgpr(s16) = G_TRUNC [[COPY1]](s32) - ; GFX9: [[LSHR:%[0-9]+]]:vgpr(s16) = G_LSHR [[TRUNC]], [[TRUNC1]](s16) - ; GFX9: [[ZEXT:%[0-9]+]]:vgpr(s64) = G_ZEXT [[LSHR]](s16) - ; GFX9: S_ENDPGM 0, implicit [[ZEXT]](s64) + ; GFX9: [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0 + ; GFX9: [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1 + ; GFX9: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32(s16) = V_MOV_B32_e32 0, implicit $exec + ; GFX9: [[V_LSHRREV_B16_e64_:%[0-9]+]]:vgpr_32(s16) = V_LSHRREV_B16_e64 [[COPY1]](s32), [[COPY]](s32), implicit $exec + ; GFX9: [[REG_SEQUENCE:%[0-9]+]]:vgpr(s64) = REG_SEQUENCE [[V_LSHRREV_B16_e64_]](s16), %subreg.sub0, [[V_MOV_B32_e32_]](s16), %subreg.sub1 + ; GFX9: S_ENDPGM 0, implicit [[REG_SEQUENCE]](s64) ; GFX10-LABEL: name: lshr_s16_vv_zext_to_s64 ; GFX10: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 ; GFX10: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1 Index: llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-shl.s16.mir =================================================================== --- llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-shl.s16.mir +++ llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-shl.s16.mir @@ -168,14 +168,12 @@ ; GFX8: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GFX8: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; GFX8: [[V_LSHLREV_B16_e64_:%[0-9]+]]:vgpr_32 = V_LSHLREV_B16_e64 [[COPY1]], [[COPY]], implicit $exec - ; GFX8: [[V_BFE_U32_:%[0-9]+]]:vgpr_32 = V_BFE_U32 [[V_LSHLREV_B16_e64_]], 0, 16, implicit $exec - ; GFX8: S_ENDPGM 0, implicit [[V_BFE_U32_]] + ; GFX8: S_ENDPGM 0, implicit [[V_LSHLREV_B16_e64_]] ; GFX9-LABEL: name: shl_s16_s16_vv_zext_to_s32 ; GFX9: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GFX9: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; GFX9: [[V_LSHLREV_B16_e64_:%[0-9]+]]:vgpr_32 = V_LSHLREV_B16_e64 [[COPY1]], [[COPY]], implicit $exec - ; GFX9: [[V_BFE_U32_:%[0-9]+]]:vgpr_32 = V_BFE_U32 [[V_LSHLREV_B16_e64_]], 0, 16, implicit $exec - ; GFX9: S_ENDPGM 0, implicit [[V_BFE_U32_]] + ; GFX9: S_ENDPGM 0, implicit [[V_LSHLREV_B16_e64_]] ; GFX10-LABEL: name: shl_s16_s16_vv_zext_to_s32 ; GFX10: $vcc_hi = IMPLICIT_DEF ; GFX10: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 @@ -202,21 +200,19 @@ liveins: $vgpr0, $vgpr1 ; GFX8-LABEL: name: shl_s16_vv_zext_to_s64 - ; GFX8: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 - ; GFX8: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1 - ; GFX8: [[TRUNC:%[0-9]+]]:vgpr(s16) = G_TRUNC [[COPY]](s32) - ; GFX8: [[TRUNC1:%[0-9]+]]:vgpr(s16) = G_TRUNC [[COPY1]](s32) - ; GFX8: [[SHL:%[0-9]+]]:vgpr(s16) = G_SHL [[TRUNC]], [[TRUNC1]](s16) - ; GFX8: [[ZEXT:%[0-9]+]]:vgpr(s64) = G_ZEXT [[SHL]](s16) - ; GFX8: S_ENDPGM 0, implicit [[ZEXT]](s64) + ; GFX8: [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0 + ; GFX8: [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1 + ; GFX8: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32(s16) = V_MOV_B32_e32 0, implicit $exec + ; GFX8: [[V_LSHLREV_B16_e64_:%[0-9]+]]:vgpr_32(s16) = V_LSHLREV_B16_e64 [[COPY1]](s32), [[COPY]](s32), implicit $exec + ; GFX8: [[REG_SEQUENCE:%[0-9]+]]:vgpr(s64) = REG_SEQUENCE [[V_LSHLREV_B16_e64_]](s16), %subreg.sub0, [[V_MOV_B32_e32_]](s16), %subreg.sub1 + ; GFX8: S_ENDPGM 0, implicit [[REG_SEQUENCE]](s64) ; GFX9-LABEL: name: shl_s16_vv_zext_to_s64 - ; GFX9: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 - ; GFX9: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1 - ; GFX9: [[TRUNC:%[0-9]+]]:vgpr(s16) = G_TRUNC [[COPY]](s32) - ; GFX9: [[TRUNC1:%[0-9]+]]:vgpr(s16) = G_TRUNC [[COPY1]](s32) - ; GFX9: [[SHL:%[0-9]+]]:vgpr(s16) = G_SHL [[TRUNC]], [[TRUNC1]](s16) - ; GFX9: [[ZEXT:%[0-9]+]]:vgpr(s64) = G_ZEXT [[SHL]](s16) - ; GFX9: S_ENDPGM 0, implicit [[ZEXT]](s64) + ; GFX9: [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0 + ; GFX9: [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1 + ; GFX9: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32(s16) = V_MOV_B32_e32 0, implicit $exec + ; GFX9: [[V_LSHLREV_B16_e64_:%[0-9]+]]:vgpr_32(s16) = V_LSHLREV_B16_e64 [[COPY1]](s32), [[COPY]](s32), implicit $exec + ; GFX9: [[REG_SEQUENCE:%[0-9]+]]:vgpr(s64) = REG_SEQUENCE [[V_LSHLREV_B16_e64_]](s16), %subreg.sub0, [[V_MOV_B32_e32_]](s16), %subreg.sub1 + ; GFX9: S_ENDPGM 0, implicit [[REG_SEQUENCE]](s64) ; GFX10-LABEL: name: shl_s16_vv_zext_to_s64 ; GFX10: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 ; GFX10: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1