Index: llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp =================================================================== --- llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp +++ llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp @@ -2015,6 +2015,13 @@ Observer.changedInstr(MI); return Legalized; + case TargetOpcode::G_ABS: + Observer.changingInstr(MI); + widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_SEXT); + widenScalarDst(MI, WideTy); + Observer.changedInstr(MI); + return Legalized; + case TargetOpcode::G_ADD: case TargetOpcode::G_AND: case TargetOpcode::G_MUL: @@ -4122,6 +4129,7 @@ case G_PTR_ADD: case G_SMULH: case G_UMULH: + case G_ABS: case G_FADD: case G_FMUL: case G_FSUB: Index: llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h =================================================================== --- llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h +++ llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h @@ -90,6 +90,9 @@ bool legalizeBuildVector(MachineInstr &MI, MachineRegisterInfo &MRI, MachineIRBuilder &B) const; + bool legalizeABS(MachineInstr &MI, MachineRegisterInfo &MRI, + MachineIRBuilder &B) const; + bool loadInputValue(Register DstReg, MachineIRBuilder &B, const ArgDescriptor *Arg, const TargetRegisterClass *ArgRC, LLT ArgTy) const; Index: llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp =================================================================== --- llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp +++ llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp @@ -962,7 +962,7 @@ .scalarize(0); if (ST.hasVOP3PInsts()) { - getActionDefinitionsBuilder({G_SMIN, G_SMAX, G_UMIN, G_UMAX}) + getActionDefinitionsBuilder({G_SMIN, G_SMAX, G_UMIN, G_UMAX, G_ABS}) .legalFor({S32, S16, V2S16}) .moreElementsIf(isSmallOddVector(0), oneMoreElement(0)) .clampMaxNumElements(0, S16, 2) @@ -971,7 +971,7 @@ .scalarize(0) .lower(); } else { - getActionDefinitionsBuilder({G_SMIN, G_SMAX, G_UMIN, G_UMAX}) + getActionDefinitionsBuilder({G_SMIN, G_SMAX, G_UMIN, G_UMAX, G_ABS}) .legalFor({S32, S16}) .widenScalarToNextPow2(0) .minScalar(0, S16) @@ -990,7 +990,7 @@ .scalarize(0) .lower(); - getActionDefinitionsBuilder({G_SMIN, G_SMAX, G_UMIN, G_UMAX}) + getActionDefinitionsBuilder({G_SMIN, G_SMAX, G_UMIN, G_UMAX, G_ABS}) .legalFor({S32}) .minScalar(0, S32) .widenScalarToNextPow2(0) @@ -1738,6 +1738,8 @@ return legalizeFFloor(MI, MRI, B); case TargetOpcode::G_BUILD_VECTOR: return legalizeBuildVector(MI, MRI, B); + case TargetOpcode::G_ABS: + return legalizeABS(MI, MRI, B); default: return false; } @@ -2686,6 +2688,21 @@ return true; } +bool AMDGPULegalizerInfo::legalizeABS(MachineInstr &MI, + MachineRegisterInfo &MRI, + MachineIRBuilder &B) const { + Register SrcReg = MI.getOperand(1).getReg(); + LLT Ty = MRI.getType(SrcReg); + + // abs x -> max x, (sub 0, x) + Register Zero = B.buildConstant(Ty, 0).getReg(0); + Register Sub = B.buildSub(Ty, Zero, SrcReg).getReg(0); + B.buildSMax(MI.getOperand(0), SrcReg, Sub); + + MI.eraseFromParent(); + return true; +} + // Check that this is a G_XOR x, -1 static bool isNot(const MachineRegisterInfo &MRI, const MachineInstr &MI) { if (MI.getOpcode() != TargetOpcode::G_XOR) Index: llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp =================================================================== --- llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp +++ llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp @@ -73,6 +73,7 @@ #include "AMDGPU.h" #include "AMDGPUGlobalISelUtils.h" #include "AMDGPUInstrInfo.h" +#include "AMDGPULegalizerInfo.h" #include "GCNSubtarget.h" #include "SIMachineFunctionInfo.h" #include "SIRegisterInfo.h" @@ -2324,6 +2325,25 @@ MI.eraseFromParent(); return; } + case AMDGPU::G_ABS: { + Register SrcReg = MI.getOperand(1).getReg(); + const RegisterBank *SrcBank = MRI.getRegBankOrNull(SrcReg); + + // There is no VALU abs instruction so we need to replace it with a sub and + // max combination. + if ((SrcBank && SrcBank == &AMDGPU::VGPRRegBank)) { + MachineFunction *MF = MI.getParent()->getParent(); + ApplyRegBankMapping Apply(*this, MRI, &AMDGPU::VGPRRegBank); + MachineIRBuilder B(MI, Apply); + LegalizerHelper Helper(*MF, Apply, B); + const AMDGPULegalizerInfo &LI = + static_cast(Helper.getLegalizerInfo()); + + if (LI.legalizeCustom(Helper, MI)) + return; + } + LLVM_FALLTHROUGH; + } case AMDGPU::G_ADD: case AMDGPU::G_SUB: case AMDGPU::G_MUL: @@ -3492,6 +3512,7 @@ } case AMDGPU::G_PTR_ADD: case AMDGPU::G_PTRMASK: + case AMDGPU::G_ABS: case AMDGPU::G_ADD: case AMDGPU::G_SUB: case AMDGPU::G_MUL: Index: llvm/lib/Target/AMDGPU/SOPInstructions.td =================================================================== --- llvm/lib/Target/AMDGPU/SOPInstructions.td +++ llvm/lib/Target/AMDGPU/SOPInstructions.td @@ -294,7 +294,9 @@ } // End SubtargetPredicate = isGFX6GFX7GFX8GFX9 let Defs = [SCC] in { -def S_ABS_I32 : SOP1_32 <"s_abs_i32">; +def S_ABS_I32 : SOP1_32 <"s_abs_i32", + [(set i32:$sdst, (abs i32:$src0))] + >; } // End Defs = [SCC] let SubtargetPredicate = HasVGPRIndexMode in { Index: llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.abs.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.abs.ll @@ -0,0 +1,154 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -global-isel -march=amdgcn -mcpu=tahiti -verify-machineinstrs -o - < %s | FileCheck %s --check-prefixes=GFX,GFX6 +; RUN: llc -global-isel -march=amdgcn -mcpu=fiji -verify-machineinstrs -o - < %s | FileCheck %s --check-prefixes=GFX,GFX8 + +declare i16 @llvm.abs.i16(i16, i1) +declare i32 @llvm.abs.i32(i32, i1) +declare i64 @llvm.abs.i64(i64, i1) +declare <4 x i32> @llvm.abs.v4i32(<4 x i32>, i1) + +define amdgpu_cs i16 @abs_sgpr_i16(i16 inreg %arg) { +; GFX-LABEL: abs_sgpr_i16: +; GFX: ; %bb.0: +; GFX-NEXT: s_sext_i32_i16 s0, s0 +; GFX-NEXT: s_abs_i32 s0, s0 +; GFX-NEXT: ; return to shader part epilog + %res = call i16 @llvm.abs.i16(i16 %arg, i1 false) + ret i16 %res +} + +define amdgpu_cs i32 @abs_sgpr_i32(i32 inreg %arg) { +; GFX-LABEL: abs_sgpr_i32: +; GFX: ; %bb.0: +; GFX-NEXT: s_abs_i32 s0, s0 +; GFX-NEXT: ; return to shader part epilog + %res = call i32 @llvm.abs.i32(i32 %arg, i1 false) + ret i32 %res +} + +define amdgpu_cs i64 @abs_sgpr_i64(i64 inreg %arg) { +; GFX-LABEL: abs_sgpr_i64: +; GFX: ; %bb.0: +; GFX-NEXT: s_ashr_i32 s2, s1, 31 +; GFX-NEXT: s_add_u32 s0, s0, s2 +; GFX-NEXT: s_cselect_b32 s4, 1, 0 +; GFX-NEXT: s_and_b32 s4, s4, 1 +; GFX-NEXT: s_cmp_lg_u32 s4, 0 +; GFX-NEXT: s_mov_b32 s3, s2 +; GFX-NEXT: s_addc_u32 s1, s1, s2 +; GFX-NEXT: s_xor_b64 s[0:1], s[0:1], s[2:3] +; GFX-NEXT: ; return to shader part epilog + %res = call i64 @llvm.abs.i64(i64 %arg, i1 false) + ret i64 %res +} + +define amdgpu_cs <4 x i32> @abs_sgpr_v4i32(<4 x i32> inreg %arg) { +; GFX-LABEL: abs_sgpr_v4i32: +; GFX: ; %bb.0: +; GFX-NEXT: s_abs_i32 s0, s0 +; GFX-NEXT: s_abs_i32 s1, s1 +; GFX-NEXT: s_abs_i32 s2, s2 +; GFX-NEXT: s_abs_i32 s3, s3 +; GFX-NEXT: ; return to shader part epilog + %res = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %arg, i1 false) + ret <4 x i32> %res +} + +define amdgpu_cs i16 @abs_vgpr_i16(i16 %arg) { +; GFX6-LABEL: abs_vgpr_i16: +; GFX6: ; %bb.0: +; GFX6-NEXT: v_bfe_i32 v0, v0, 0, 16 +; GFX6-NEXT: v_sub_i32_e32 v1, vcc, 0, v0 +; GFX6-NEXT: v_max_i32_e32 v0, v0, v1 +; GFX6-NEXT: v_readfirstlane_b32 s0, v0 +; GFX6-NEXT: ; return to shader part epilog +; +; GFX8-LABEL: abs_vgpr_i16: +; GFX8: ; %bb.0: +; GFX8-NEXT: v_sub_u16_e32 v1, 0, v0 +; GFX8-NEXT: v_max_i16_e32 v0, v0, v1 +; GFX8-NEXT: v_readfirstlane_b32 s0, v0 +; GFX8-NEXT: ; return to shader part epilog + %res = call i16 @llvm.abs.i16(i16 %arg, i1 false) + ret i16 %res +} + +define amdgpu_cs i32 @abs_vgpr_i32(i32 %arg) { +; GFX6-LABEL: abs_vgpr_i32: +; GFX6: ; %bb.0: +; GFX6-NEXT: v_sub_i32_e32 v1, vcc, 0, v0 +; GFX6-NEXT: v_max_i32_e32 v0, v0, v1 +; GFX6-NEXT: v_readfirstlane_b32 s0, v0 +; GFX6-NEXT: ; return to shader part epilog +; +; GFX8-LABEL: abs_vgpr_i32: +; GFX8: ; %bb.0: +; GFX8-NEXT: v_sub_u32_e32 v1, vcc, 0, v0 +; GFX8-NEXT: v_max_i32_e32 v0, v0, v1 +; GFX8-NEXT: v_readfirstlane_b32 s0, v0 +; GFX8-NEXT: ; return to shader part epilog + %res = call i32 @llvm.abs.i32(i32 %arg, i1 false) + ret i32 %res +} + +define amdgpu_cs i64 @abs_vgpr_i64(i64 %arg) { +; GFX6-LABEL: abs_vgpr_i64: +; GFX6: ; %bb.0: +; GFX6-NEXT: v_ashrrev_i32_e32 v2, 31, v1 +; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v2 +; GFX6-NEXT: v_addc_u32_e32 v1, vcc, v1, v2, vcc +; GFX6-NEXT: v_xor_b32_e32 v0, v0, v2 +; GFX6-NEXT: v_xor_b32_e32 v1, v1, v2 +; GFX6-NEXT: v_readfirstlane_b32 s0, v0 +; GFX6-NEXT: v_readfirstlane_b32 s1, v1 +; GFX6-NEXT: ; return to shader part epilog +; +; GFX8-LABEL: abs_vgpr_i64: +; GFX8: ; %bb.0: +; GFX8-NEXT: v_ashrrev_i32_e32 v2, 31, v1 +; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v2 +; GFX8-NEXT: v_addc_u32_e32 v1, vcc, v1, v2, vcc +; GFX8-NEXT: v_xor_b32_e32 v0, v0, v2 +; GFX8-NEXT: v_xor_b32_e32 v1, v1, v2 +; GFX8-NEXT: v_readfirstlane_b32 s0, v0 +; GFX8-NEXT: v_readfirstlane_b32 s1, v1 +; GFX8-NEXT: ; return to shader part epilog + %res = call i64 @llvm.abs.i64(i64 %arg, i1 false) + ret i64 %res +} + +define amdgpu_cs <4 x i32> @abs_vgpr_v4i32(<4 x i32> %arg) { +; GFX6-LABEL: abs_vgpr_v4i32: +; GFX6: ; %bb.0: +; GFX6-NEXT: v_sub_i32_e32 v4, vcc, 0, v0 +; GFX6-NEXT: v_max_i32_e32 v0, v0, v4 +; GFX6-NEXT: v_sub_i32_e32 v4, vcc, 0, v1 +; GFX6-NEXT: v_max_i32_e32 v1, v1, v4 +; GFX6-NEXT: v_sub_i32_e32 v4, vcc, 0, v2 +; GFX6-NEXT: v_max_i32_e32 v2, v2, v4 +; GFX6-NEXT: v_sub_i32_e32 v4, vcc, 0, v3 +; GFX6-NEXT: v_max_i32_e32 v3, v3, v4 +; GFX6-NEXT: v_readfirstlane_b32 s0, v0 +; GFX6-NEXT: v_readfirstlane_b32 s1, v1 +; GFX6-NEXT: v_readfirstlane_b32 s2, v2 +; GFX6-NEXT: v_readfirstlane_b32 s3, v3 +; GFX6-NEXT: ; return to shader part epilog +; +; GFX8-LABEL: abs_vgpr_v4i32: +; GFX8: ; %bb.0: +; GFX8-NEXT: v_sub_u32_e32 v4, vcc, 0, v0 +; GFX8-NEXT: v_max_i32_e32 v0, v0, v4 +; GFX8-NEXT: v_sub_u32_e32 v4, vcc, 0, v1 +; GFX8-NEXT: v_max_i32_e32 v1, v1, v4 +; GFX8-NEXT: v_sub_u32_e32 v4, vcc, 0, v2 +; GFX8-NEXT: v_max_i32_e32 v2, v2, v4 +; GFX8-NEXT: v_sub_u32_e32 v4, vcc, 0, v3 +; GFX8-NEXT: v_max_i32_e32 v3, v3, v4 +; GFX8-NEXT: v_readfirstlane_b32 s0, v0 +; GFX8-NEXT: v_readfirstlane_b32 s1, v1 +; GFX8-NEXT: v_readfirstlane_b32 s2, v2 +; GFX8-NEXT: v_readfirstlane_b32 s3, v3 +; GFX8-NEXT: ; return to shader part epilog + %res = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %arg, i1 false) + ret <4 x i32> %res +}