Index: llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
===================================================================
--- llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
+++ llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
@@ -2423,6 +2423,17 @@
     ApplyRegBankMapping ApplySALU(*this, MRI, &AMDGPU::SGPRRegBank);
     MachineIRBuilder B(MI, ApplySALU);
 
+    if (DstTy.isVector() && Opc == AMDGPU::G_ABS) {
+        Register WideSrcLo, WideSrcHi;
+
+        std::tie(WideSrcLo, WideSrcHi)
+          = unpackV2S16ToS32(B, MI.getOperand(1).getReg(), TargetOpcode::G_SEXT);
+        auto Lo = B.buildInstr(AMDGPU::G_ABS, {S32}, {WideSrcLo});
+        auto Hi = B.buildInstr(AMDGPU::G_ABS, {S32}, {WideSrcHi});
+        B.buildBuildVectorTrunc(DstReg, {Lo.getReg(0), Hi.getReg(0)});
+        MI.eraseFromParent();
+        return;
+    }
     if (DstTy.isVector()) {
       Register WideSrc0Lo, WideSrc0Hi;
       Register WideSrc1Lo, WideSrc1Hi;
Index: llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.abs.vector.ll
===================================================================
--- /dev/null
+++ llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.abs.vector.ll
@@ -0,0 +1,377 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -global-isel -march=amdgcn -mcpu=fiji -verify-machineinstrs -o - < %s | FileCheck %s --check-prefix=GFX8
+; RUN: llc -global-isel -march=amdgcn -mcpu=gfx1010 -verify-machineinstrs -o - < %s | FileCheck %s --check-prefix=GFX10
+
+declare <2 x i8> @llvm.abs.v2i8(<2 x i8>, i1)
+declare <3 x i8> @llvm.abs.v3i8(<3 x i8>, i1)
+declare <2 x i16> @llvm.abs.v2i16(<2 x i16>, i1)
+declare <3 x i16> @llvm.abs.v3i16(<3 x i16>, i1)
+
+define amdgpu_cs void @abs_sgpr_v2i8(ptr addrspace(1) %ptr, <2 x i8> inreg %arg) {
+; GFX-LABEL: abs_sgpr_v2i8:
+; GFX:       ; %bb.0:
+; GFX-NEXT:    s_sext_i32_i8 s1, s1
+; GFX-NEXT:    s_sext_i32_i8 s0, s0
+; GFX-NEXT:    s_sext_i32_i16 s1, s1
+; GFX-NEXT:    s_sext_i32_i16 s0, s0
+; GFX-NEXT:    s_abs_i32 s1, s1
+; GFX-NEXT:    s_abs_i32 s0, s0
+; GFX-NEXT:    s_and_b32 s1, s1, 0xff
+; GFX-NEXT:    s_and_b32 s0, s0, 0xff
+; GFX-NEXT:    s_lshl_b32 s1, s1, 8
+; GFX-NEXT:    s_or_b32 s0, s0, s1
+; GFX-NEXT:    v_mov_b32_e32 v2, s0
+; GFX-NEXT:    global_store_short v[0:1], v2, off
+; GFX-NEXT:    s_endpgm
+; GFX8-LABEL: abs_sgpr_v2i8:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_lshl_b32 s1, s1, 8
+; GFX8-NEXT:    s_lshl_b32 s0, s0, 8
+; GFX8-NEXT:    s_sext_i32_i16 s1, s1
+; GFX8-NEXT:    s_sext_i32_i16 s0, s0
+; GFX8-NEXT:    s_ashr_i32 s1, s1, 8
+; GFX8-NEXT:    s_ashr_i32 s0, s0, 8
+; GFX8-NEXT:    s_sext_i32_i16 s1, s1
+; GFX8-NEXT:    s_sext_i32_i16 s0, s0
+; GFX8-NEXT:    s_abs_i32 s1, s1
+; GFX8-NEXT:    s_abs_i32 s0, s0
+; GFX8-NEXT:    s_and_b32 s1, s1, 0xff
+; GFX8-NEXT:    s_and_b32 s0, s0, 0xff
+; GFX8-NEXT:    s_lshl_b32 s1, s1, 8
+; GFX8-NEXT:    s_or_b32 s0, s0, s1
+; GFX8-NEXT:    v_mov_b32_e32 v2, s0
+; GFX8-NEXT:    flat_store_short v[0:1], v2
+; GFX8-NEXT:    s_endpgm
+;
+; GFX10-LABEL: abs_sgpr_v2i8:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_sext_i32_i8 s1, s1
+; GFX10-NEXT:    s_sext_i32_i8 s0, s0
+; GFX10-NEXT:    s_sext_i32_i16 s1, s1
+; GFX10-NEXT:    s_sext_i32_i16 s0, s0
+; GFX10-NEXT:    s_abs_i32 s1, s1
+; GFX10-NEXT:    s_abs_i32 s0, s0
+; GFX10-NEXT:    s_and_b32 s1, s1, 0xff
+; GFX10-NEXT:    s_and_b32 s0, s0, 0xff
+; GFX10-NEXT:    s_lshl_b32 s1, s1, 8
+; GFX10-NEXT:    s_or_b32 s0, s0, s1
+; GFX10-NEXT:    v_mov_b32_e32 v2, s0
+; GFX10-NEXT:    global_store_short v[0:1], v2, off
+; GFX10-NEXT:    s_endpgm
+  %res = call <2 x i8> @llvm.abs.v2i8(<2 x i8> %arg, i1 false)
+  store <2 x i8> %res, ptr addrspace(1) %ptr
+  ret void
+}
+
+define amdgpu_cs void @abs_vgpr_v2i8(ptr addrspace(1) %ptr, <2 x i8> %arg) {
+; GFX-LABEL: abs_vgpr_v2i8:
+; GFX:       ; %bb.0:
+; GFX-NEXT:    v_bfe_i32 v3, v3, 0, 8
+; GFX-NEXT:    v_bfe_i32 v2, v2, 0, 8
+; GFX-NEXT:    s_movk_i32 s0, 0xff
+; GFX-NEXT:    v_sub_nc_u16 v4, 0, v3
+; GFX-NEXT:    v_sub_nc_u16 v5, 0, v2
+; GFX-NEXT:    v_max_i16 v3, v3, v4
+; GFX-NEXT:    v_max_i16 v2, v2, v5
+; GFX-NEXT:    v_and_b32_sdwa v3, v3, s0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX-NEXT:    v_or_b32_sdwa v2, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX-NEXT:    global_store_short v[0:1], v2, off
+; GFX-NEXT:    s_endpgm
+; GFX8-LABEL: abs_vgpr_v2i8:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    v_lshlrev_b16_e32 v2, 8, v2
+; GFX8-NEXT:    v_ashrrev_i16_e32 v2, 8, v2
+; GFX8-NEXT:    v_lshlrev_b16_e32 v3, 8, v3
+; GFX8-NEXT:    v_sub_u16_e32 v4, 0, v2
+; GFX8-NEXT:    v_ashrrev_i16_e32 v3, 8, v3
+; GFX8-NEXT:    v_max_i16_e32 v2, v2, v4
+; GFX8-NEXT:    v_sub_u16_e32 v4, 0, v3
+; GFX8-NEXT:    v_max_i16_e32 v3, v3, v4
+; GFX8-NEXT:    v_and_b32_e32 v3, 0xff, v3
+; GFX8-NEXT:    v_lshlrev_b16_e32 v3, 8, v3
+; GFX8-NEXT:    v_or_b32_sdwa v2, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT:    flat_store_short v[0:1], v2
+; GFX8-NEXT:    s_endpgm
+;
+; GFX10-LABEL: abs_vgpr_v2i8:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    v_bfe_i32 v3, v3, 0, 8
+; GFX10-NEXT:    v_bfe_i32 v2, v2, 0, 8
+; GFX10-NEXT:    s_movk_i32 s0, 0xff
+; GFX10-NEXT:    v_sub_nc_u16 v4, 0, v3
+; GFX10-NEXT:    v_sub_nc_u16 v5, 0, v2
+; GFX10-NEXT:    v_max_i16 v3, v3, v4
+; GFX10-NEXT:    v_max_i16 v2, v2, v5
+; GFX10-NEXT:    v_and_b32_sdwa v3, v3, s0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX10-NEXT:    v_or_b32_sdwa v2, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX10-NEXT:    global_store_short v[0:1], v2, off
+; GFX10-NEXT:    s_endpgm
+  %res = call <2 x i8> @llvm.abs.v2i8(<2 x i8> %arg, i1 false)
+  store <2 x i8> %res, ptr addrspace(1) %ptr
+  ret void
+}
+
+define amdgpu_cs <3 x i8> @abs_sgpr_v3i8(<3 x i8> inreg %arg) {
+; GFX-LABEL: abs_sgpr_v3i8:
+; GFX:       ; %bb.0:
+; GFX-NEXT:    s_sext_i32_i8 s0, s0
+; GFX-NEXT:    s_sext_i32_i8 s1, s1
+; GFX-NEXT:    s_sext_i32_i8 s2, s2
+; GFX-NEXT:    s_sext_i32_i16 s0, s0
+; GFX-NEXT:    s_sext_i32_i16 s1, s1
+; GFX-NEXT:    s_sext_i32_i16 s2, s2
+; GFX-NEXT:    s_abs_i32 s0, s0
+; GFX-NEXT:    s_abs_i32 s1, s1
+; GFX-NEXT:    s_abs_i32 s2, s2
+; GFX-NEXT:    ; return to shader part epilog
+; GFX8-LABEL: abs_sgpr_v3i8:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_lshl_b32 s0, s0, 8
+; GFX8-NEXT:    s_lshl_b32 s1, s1, 8
+; GFX8-NEXT:    s_lshl_b32 s2, s2, 8
+; GFX8-NEXT:    s_sext_i32_i16 s0, s0
+; GFX8-NEXT:    s_sext_i32_i16 s1, s1
+; GFX8-NEXT:    s_sext_i32_i16 s2, s2
+; GFX8-NEXT:    s_ashr_i32 s0, s0, 8
+; GFX8-NEXT:    s_ashr_i32 s1, s1, 8
+; GFX8-NEXT:    s_ashr_i32 s2, s2, 8
+; GFX8-NEXT:    s_sext_i32_i16 s0, s0
+; GFX8-NEXT:    s_sext_i32_i16 s1, s1
+; GFX8-NEXT:    s_sext_i32_i16 s2, s2
+; GFX8-NEXT:    s_abs_i32 s0, s0
+; GFX8-NEXT:    s_abs_i32 s1, s1
+; GFX8-NEXT:    s_abs_i32 s2, s2
+; GFX8-NEXT:    ; return to shader part epilog
+;
+; GFX10-LABEL: abs_sgpr_v3i8:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_sext_i32_i8 s0, s0
+; GFX10-NEXT:    s_sext_i32_i8 s1, s1
+; GFX10-NEXT:    s_sext_i32_i8 s2, s2
+; GFX10-NEXT:    s_sext_i32_i16 s0, s0
+; GFX10-NEXT:    s_sext_i32_i16 s1, s1
+; GFX10-NEXT:    s_sext_i32_i16 s2, s2
+; GFX10-NEXT:    s_abs_i32 s0, s0
+; GFX10-NEXT:    s_abs_i32 s1, s1
+; GFX10-NEXT:    s_abs_i32 s2, s2
+; GFX10-NEXT:    ; return to shader part epilog
+  %res = call <3 x i8> @llvm.abs.v3i8(<3 x i8> %arg, i1 false)
+  ret <3 x i8> %res
+}
+
+define amdgpu_cs void @abs_vgpr_v3i8(ptr addrspace(1) %ptr, <3 x i8>  %arg) {
+; GFX-LABEL: abs_vgpr_v3i8:
+; GFX:       ; %bb.0:
+; GFX-NEXT:    v_bfe_i32 v0, v0, 0, 8
+; GFX-NEXT:    v_bfe_i32 v1, v1, 0, 8
+; GFX-NEXT:    v_bfe_i32 v2, v2, 0, 8
+; GFX-NEXT:    v_sub_nc_u16 v3, 0, v0
+; GFX-NEXT:    v_sub_nc_u16 v4, 0, v1
+; GFX-NEXT:    v_sub_nc_u16 v5, 0, v2
+; GFX-NEXT:    v_max_i16 v0, v0, v3
+; GFX-NEXT:    v_max_i16 v1, v1, v4
+; GFX-NEXT:    v_max_i16 v2, v2, v5
+; GFX-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX-NEXT:    v_readfirstlane_b32 s1, v1
+; GFX-NEXT:    v_readfirstlane_b32 s2, v2
+; GFX-NEXT:    ; return to shader part epilog
+; GFX8-LABEL: abs_vgpr_v3i8:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    v_lshlrev_b16_e32 v2, 8, v2
+; GFX8-NEXT:    v_ashrrev_i16_e32 v2, 8, v2
+; GFX8-NEXT:    v_lshlrev_b16_e32 v3, 8, v3
+; GFX8-NEXT:    v_sub_u16_e32 v5, 0, v2
+; GFX8-NEXT:    v_ashrrev_i16_e32 v3, 8, v3
+; GFX8-NEXT:    v_max_i16_e32 v2, v2, v5
+; GFX8-NEXT:    v_sub_u16_e32 v5, 0, v3
+; GFX8-NEXT:    v_max_i16_e32 v3, v3, v5
+; GFX8-NEXT:    v_lshlrev_b16_e32 v4, 8, v4
+; GFX8-NEXT:    v_ashrrev_i16_e32 v4, 8, v4
+; GFX8-NEXT:    v_and_b32_e32 v3, 0xff, v3
+; GFX8-NEXT:    v_sub_u16_e32 v5, 0, v4
+; GFX8-NEXT:    v_lshlrev_b16_e32 v3, 8, v3
+; GFX8-NEXT:    v_max_i16_e32 v4, v4, v5
+; GFX8-NEXT:    v_or_b32_sdwa v2, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT:    v_mov_b32_e32 v3, 0xffff
+; GFX8-NEXT:    v_and_b32_sdwa v3, v3, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT:    v_or_b32_sdwa v4, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX8-NEXT:    v_add_u32_e32 v2, vcc, 2, v0
+; GFX8-NEXT:    v_lshrrev_b32_e32 v5, 16, v4
+; GFX8-NEXT:    v_addc_u32_e32 v3, vcc, 0, v1, vcc
+; GFX8-NEXT:    flat_store_short v[0:1], v4
+; GFX8-NEXT:    flat_store_byte v[2:3], v5
+; GFX8-NEXT:    s_endpgm
+;
+; GFX10-LABEL: abs_vgpr_v3i8:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    v_bfe_i32 v3, v3, 0, 8
+; GFX10-NEXT:    v_bfe_i32 v2, v2, 0, 8
+; GFX10-NEXT:    v_bfe_i32 v4, v4, 0, 8
+; GFX10-NEXT:    s_movk_i32 s0, 0xff
+; GFX10-NEXT:    v_sub_nc_u16 v5, 0, v3
+; GFX10-NEXT:    v_sub_nc_u16 v6, 0, v2
+; GFX10-NEXT:    v_max_i16 v3, v3, v5
+; GFX10-NEXT:    v_sub_nc_u16 v5, 0, v4
+; GFX10-NEXT:    v_max_i16 v2, v2, v6
+; GFX10-NEXT:    v_mov_b32_e32 v6, 0xffff
+; GFX10-NEXT:    v_and_b32_sdwa v3, v3, s0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX10-NEXT:    v_max_i16 v4, v4, v5
+; GFX10-NEXT:    v_or_b32_sdwa v2, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX10-NEXT:    v_and_b32_sdwa v3, v6, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT:    v_and_b32_e32 v2, 0xffff, v2
+; GFX10-NEXT:    v_lshl_or_b32 v2, v3, 16, v2
+; GFX10-NEXT:    global_store_short v[0:1], v2, off
+; GFX10-NEXT:    global_store_byte_d16_hi v[0:1], v2, off offset:2
+; GFX10-NEXT:    s_endpgm
+  %res = call <3 x i8> @llvm.abs.v3i8(<3 x i8> %arg, i1 false)
+  store <3 x i8> %res, ptr addrspace(1) %ptr
+  ret void
+}
+
+define amdgpu_cs void @abs_sgpr_v2i16(ptr addrspace(1) %ptr, <2 x i16> inreg %arg) {
+; GFX-LABEL: abs_sgpr_v2i16:
+; GFX:       ; %bb.0:
+; GFX-NEXT:    s_sext_i32_i16 s1, s0
+; GFX-NEXT:    s_ashr_i32 s0, s0, 16
+; GFX-NEXT:    s_abs_i32 s1, s1
+; GFX-NEXT:    s_abs_i32 s0, s0
+; GFX-NEXT:    s_pack_ll_b32_b16 s0, s1, s0
+; GFX-NEXT:    v_mov_b32_e32 v2, s0
+; GFX-NEXT:    global_store_dword v[0:1], v2, off
+; GFX-NEXT:    s_endpgm
+; GFX8-LABEL: abs_sgpr_v2i16:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_lshr_b32 s1, s0, 16
+; GFX8-NEXT:    s_sext_i32_i16 s1, s1
+; GFX8-NEXT:    s_sext_i32_i16 s0, s0
+; GFX8-NEXT:    s_abs_i32 s1, s1
+; GFX8-NEXT:    s_abs_i32 s0, s0
+; GFX8-NEXT:    s_and_b32 s1, 0xffff, s1
+; GFX8-NEXT:    s_and_b32 s0, 0xffff, s0
+; GFX8-NEXT:    s_lshl_b32 s1, s1, 16
+; GFX8-NEXT:    s_or_b32 s0, s0, s1
+; GFX8-NEXT:    v_mov_b32_e32 v2, s0
+; GFX8-NEXT:    flat_store_dword v[0:1], v2
+; GFX8-NEXT:    s_endpgm
+;
+; GFX10-LABEL: abs_sgpr_v2i16:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_sext_i32_i16 s1, s0
+; GFX10-NEXT:    s_ashr_i32 s0, s0, 16
+; GFX10-NEXT:    s_abs_i32 s1, s1
+; GFX10-NEXT:    s_abs_i32 s0, s0
+; GFX10-NEXT:    s_pack_ll_b32_b16 s0, s1, s0
+; GFX10-NEXT:    v_mov_b32_e32 v2, s0
+; GFX10-NEXT:    global_store_dword v[0:1], v2, off
+; GFX10-NEXT:    s_endpgm
+  %res = call <2 x i16> @llvm.abs.v2i16(<2 x i16> %arg, i1 false)
+  store <2 x i16> %res, ptr addrspace(1) %ptr
+  ret void
+}
+
+define amdgpu_cs void @abs_vgpr_v2i16(ptr addrspace(1) %ptr, <2 x i16> %arg) {
+; GFX-LABEL: abs_vgpr_v2i16:
+; GFX:       ; %bb.0:
+; GFX-NEXT:    v_pk_sub_i16 v3, 0, v2
+; GFX-NEXT:    v_pk_max_i16 v2, v2, v3
+; GFX-NEXT:    global_store_dword v[0:1], v2, off
+; GFX-NEXT:    s_endpgm
+; GFX8-LABEL: abs_vgpr_v2i16:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    v_lshrrev_b32_e32 v3, 16, v2
+; GFX8-NEXT:    v_sub_u16_e32 v4, 0, v2
+; GFX8-NEXT:    v_sub_u16_e32 v5, 0, v3
+; GFX8-NEXT:    v_max_i16_e32 v2, v2, v4
+; GFX8-NEXT:    v_max_i16_sdwa v3, v3, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT:    v_or_b32_e32 v2, v2, v3
+; GFX8-NEXT:    flat_store_dword v[0:1], v2
+; GFX8-NEXT:    s_endpgm
+;
+; GFX10-LABEL: abs_vgpr_v2i16:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    v_pk_sub_i16 v3, 0, v2
+; GFX10-NEXT:    v_pk_max_i16 v2, v2, v3
+; GFX10-NEXT:    global_store_dword v[0:1], v2, off
+; GFX10-NEXT:    s_endpgm
+  %res = call <2 x i16> @llvm.abs.v2i16(<2 x i16> %arg, i1 false)
+  store <2 x i16> %res, ptr addrspace(1) %ptr
+  ret void
+}
+
+define amdgpu_cs void @abs_sgpr_v3i16(ptr addrspace(1) %ptr, <3 x i16> inreg %arg) {
+; GFX8-LABEL: abs_sgpr_v3i16:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_lshr_b32 s2, s0, 16
+; GFX8-NEXT:    s_sext_i32_i16 s0, s0
+; GFX8-NEXT:    s_abs_i32 s0, s0
+; GFX8-NEXT:    v_mov_b32_e32 v2, s0
+; GFX8-NEXT:    s_sext_i32_i16 s2, s2
+; GFX8-NEXT:    flat_store_short v[0:1], v2
+; GFX8-NEXT:    v_add_u32_e32 v2, vcc, 2, v0
+; GFX8-NEXT:    s_abs_i32 s2, s2
+; GFX8-NEXT:    s_sext_i32_i16 s1, s1
+; GFX8-NEXT:    v_addc_u32_e32 v3, vcc, 0, v1, vcc
+; GFX8-NEXT:    s_abs_i32 s1, s1
+; GFX8-NEXT:    v_mov_b32_e32 v4, s2
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, 4, v0
+; GFX8-NEXT:    flat_store_short v[2:3], v4
+; GFX8-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GFX8-NEXT:    v_mov_b32_e32 v2, s1
+; GFX8-NEXT:    flat_store_short v[0:1], v2
+; GFX8-NEXT:    s_endpgm
+;
+; GFX10-LABEL: abs_sgpr_v3i16:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_sext_i32_i16 s2, s0
+; GFX10-NEXT:    s_ashr_i32 s0, s0, 16
+; GFX10-NEXT:    s_abs_i32 s2, s2
+; GFX10-NEXT:    s_abs_i32 s0, s0
+; GFX10-NEXT:    s_sext_i32_i16 s1, s1
+; GFX10-NEXT:    s_pack_ll_b32_b16 s0, s2, s0
+; GFX10-NEXT:    s_abs_i32 s1, s1
+; GFX10-NEXT:    s_lshr_b32 s2, s0, 16
+; GFX10-NEXT:    v_mov_b32_e32 v2, s0
+; GFX10-NEXT:    v_mov_b32_e32 v3, s2
+; GFX10-NEXT:    v_mov_b32_e32 v4, s1
+; GFX10-NEXT:    global_store_short v[0:1], v2, off
+; GFX10-NEXT:    global_store_short v[0:1], v3, off offset:2
+; GFX10-NEXT:    global_store_short v[0:1], v4, off offset:4
+; GFX10-NEXT:    s_endpgm
+  %res = call <3 x i16> @llvm.abs.v3i16(<3 x i16> %arg, i1 false)
+  store <3 x i16> %res, ptr addrspace(1) %ptr
+  ret void
+}
+
+define amdgpu_cs void @abs_vgpr_v3i16(ptr addrspace(1) %ptr, <3 x i16> %arg) {
+; GFX8-LABEL: abs_vgpr_v3i16:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    v_lshrrev_b32_e32 v4, 16, v2
+; GFX8-NEXT:    v_sub_u16_e32 v5, 0, v2
+; GFX8-NEXT:    v_max_i16_e32 v2, v2, v5
+; GFX8-NEXT:    v_sub_u16_e32 v5, 0, v4
+; GFX8-NEXT:    v_max_i16_e32 v4, v4, v5
+; GFX8-NEXT:    v_sub_u16_e32 v5, 0, v3
+; GFX8-NEXT:    flat_store_short v[0:1], v2
+; GFX8-NEXT:    v_add_u32_e32 v2, vcc, 2, v0
+; GFX8-NEXT:    v_max_i16_e32 v5, v3, v5
+; GFX8-NEXT:    v_addc_u32_e32 v3, vcc, 0, v1, vcc
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, 4, v0
+; GFX8-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GFX8-NEXT:    flat_store_short v[2:3], v4
+; GFX8-NEXT:    flat_store_short v[0:1], v5
+; GFX8-NEXT:    s_endpgm
+;
+; GFX10-LABEL: abs_vgpr_v3i16:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    v_pk_sub_i16 v4, 0, v2
+; GFX10-NEXT:    v_sub_nc_u16 v5, 0, v3
+; GFX10-NEXT:    v_pk_max_i16 v2, v2, v4
+; GFX10-NEXT:    v_max_i16 v3, v3, v5
+; GFX10-NEXT:    global_store_short v[0:1], v2, off
+; GFX10-NEXT:    global_store_short_d16_hi v[0:1], v2, off offset:2
+; GFX10-NEXT:    global_store_short v[0:1], v3, off offset:4
+; GFX10-NEXT:    s_endpgm
+  %res = call <3 x i16> @llvm.abs.v3i16(<3 x i16> %arg, i1 false)
+  store <3 x i16> %res, ptr addrspace(1) %ptr
+  ret void
+}
Index: llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-abs.mir
===================================================================
--- /dev/null
+++ llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-abs.mir
@@ -0,0 +1,87 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -march=amdgcn -mcpu=gfx1010 -run-pass=amdgpu-regbankselect %s -verify-machineinstrs -o - | FileCheck %s
+
+---
+name: abs_sgpr_s16
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0
+
+    ; CHECK-LABEL: name: abs_sgpr_s16
+    ; CHECK: liveins: $sgpr0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:sgpr(s16) = G_TRUNC [[COPY]](s32)
+    ; CHECK-NEXT: [[SEXT:%[0-9]+]]:sgpr(s32) = G_SEXT [[TRUNC]](s16)
+    ; CHECK-NEXT: [[ABS:%[0-9]+]]:sgpr(s32) = G_ABS [[SEXT]]
+    ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:sgpr(s16) = G_TRUNC [[ABS]](s32)
+    %1:_(s32) = COPY $sgpr0
+    %2:_(s16) = G_TRUNC %1
+    %5:_(s16) = G_ABS %2
+...
+
+---
+name: abs_vgpr_s16
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $vgpr0
+
+    ; CHECK-LABEL: name: abs_vgpr_s16
+    ; CHECK: liveins: $vgpr0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:vgpr(s16) = G_TRUNC [[COPY]](s32)
+    ; CHECK-NEXT: [[C:%[0-9]+]]:vgpr(s16) = G_CONSTANT i16 0
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:vgpr(s16) = G_SUB [[C]], [[TRUNC]]
+    ; CHECK-NEXT: [[SMAX:%[0-9]+]]:vgpr(s16) = G_SMAX [[TRUNC]], [[SUB]]
+    %1:_(s32) = COPY $vgpr0
+    %2:_(s16) = G_TRUNC %1
+    %5:_(s16) = G_ABS %2
+...
+
+---
+name: abs_sgpr_v2i16
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0
+
+    ; CHECK-LABEL: name: abs_sgpr_v2i16
+    ; CHECK: liveins: $sgpr0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(<2 x s16>) = COPY $sgpr0
+    ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:sgpr(s32) = G_BITCAST [[COPY]](<2 x s16>)
+    ; CHECK-NEXT: [[SEXT_INREG:%[0-9]+]]:sgpr(s32) = G_SEXT_INREG [[BITCAST]], 16
+    ; CHECK-NEXT: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 16
+    ; CHECK-NEXT: [[ASHR:%[0-9]+]]:sgpr(s32) = G_ASHR [[BITCAST]], [[C]](s32)
+    ; CHECK-NEXT: [[ABS:%[0-9]+]]:sgpr(s32) = G_ABS [[SEXT_INREG]]
+    ; CHECK-NEXT: [[ABS1:%[0-9]+]]:sgpr(s32) = G_ABS [[ASHR]]
+    ; CHECK-NEXT: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:sgpr(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[ABS]](s32), [[ABS1]](s32)
+    %1:_(<2 x s16>) = COPY $sgpr0
+    %5:_(<2 x s16>) = G_ABS %1
+...
+
+---
+name: abs_vgpr_v2i16
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $vgpr0
+
+    ; CHECK-LABEL: name: abs_vgpr_v2i16
+    ; CHECK: liveins: $vgpr0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(<2 x s16>) = COPY $vgpr0
+    ; CHECK-NEXT: [[C:%[0-9]+]]:vgpr(s16) = G_CONSTANT i16 0
+    ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:vgpr(<2 x s16>) = G_BUILD_VECTOR [[C]](s16), [[C]](s16)
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:vgpr(<2 x s16>) = G_SUB [[BUILD_VECTOR]], [[COPY]]
+    ; CHECK-NEXT: [[SMAX:%[0-9]+]]:vgpr(<2 x s16>) = G_SMAX [[COPY]], [[SUB]]
+    %1:_(<2 x s16>) = COPY $vgpr0
+    %5:_(<2 x s16>) = G_ABS %1
+...