Index: llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h =================================================================== --- llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h +++ llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h @@ -372,6 +372,9 @@ bool matchCombineFAbsOfFAbs(MachineInstr &MI, Register &Src); void applyCombineFAbsOfFAbs(MachineInstr &MI, Register &Src); + /// Transform fabs(fneg(x)) to fabs(x). + bool matchCombineFAbsOfFNeg(MachineInstr &MI, BuildFnTy &MatchInfo); + /// Transform trunc ([asz]ext x) to x or ([asz]ext x) or (trunc x). bool matchCombineTruncOfExt(MachineInstr &MI, std::pair &MatchInfo); Index: llvm/include/llvm/Target/GlobalISel/Combine.td =================================================================== --- llvm/include/llvm/Target/GlobalISel/Combine.td +++ llvm/include/llvm/Target/GlobalISel/Combine.td @@ -519,6 +519,13 @@ (apply [{ return Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }]) >; +// Fold (fabs (fneg x)) -> (fabs x). +def fabs_fneg_fold: GICombineRule < + (defs root:$root, build_fn_matchinfo:$matchinfo), + (match (wip_match_opcode G_FABS):$root, + [{ return Helper.matchCombineFAbsOfFNeg(*${root}, ${matchinfo}); }]), + (apply [{ Helper.applyBuildFnNoErase(*${root}, ${matchinfo}); }])>; + // Fold (unmerge cst) -> cst1, cst2, ... def unmerge_cst_matchinfo : GIDefMatchData<"SmallVector">; def unmerge_cst : GICombineRule< @@ -754,7 +761,7 @@ const_combines, xor_of_and_with_same_reg, ptr_add_with_zero, shift_immed_chain, shift_of_shifted_logic_chain, load_or_combine, truncstore_merge, div_rem_to_divrem, funnel_shift_combines, - form_bitfield_extract, constant_fold]>; + form_bitfield_extract, constant_fold, fabs_fneg_fold]>; // A combine group used to for prelegalizer combiners at -O0. The combines in // this group have been selected based on experiments to balance code size and Index: llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp =================================================================== --- llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp +++ llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp @@ -2131,6 +2131,22 @@ return mi_match(Src, MRI, m_GFabs(m_Reg(AbsSrc))); } +bool CombinerHelper::matchCombineFAbsOfFNeg(MachineInstr &MI, + BuildFnTy &MatchInfo) { + assert(MI.getOpcode() == TargetOpcode::G_FABS && "Expected a G_FABS"); + Register Src = MI.getOperand(1).getReg(); + Register NegSrc; + if (mi_match(Src, MRI, m_GFNeg(m_Reg(NegSrc)))) { + MatchInfo = [=, &MI](MachineIRBuilder &B) { + Observer.changingInstr(MI); + MI.getOperand(1).setReg(NegSrc); + Observer.changedInstr(MI); + }; + return true; + } + return false; +} + bool CombinerHelper::matchCombineTruncOfExt( MachineInstr &MI, std::pair &MatchInfo) { assert(MI.getOpcode() == TargetOpcode::G_TRUNC && "Expected a G_TRUNC"); Index: llvm/test/CodeGen/AMDGPU/GlobalISel/combine-fabs-fneg.mir =================================================================== --- /dev/null +++ llvm/test/CodeGen/AMDGPU/GlobalISel/combine-fabs-fneg.mir @@ -0,0 +1,102 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -march=amdgcn -run-pass=amdgpu-prelegalizer-combiner -verify-machineinstrs %s -o - | FileCheck %s + +--- +name: test_f16 +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0 + + ; CHECK-LABEL: name: test_f16 + ; CHECK: liveins: $vgpr0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32) + ; CHECK-NEXT: [[FABS:%[0-9]+]]:_(s16) = G_FABS [[TRUNC]] + ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FABS]](s16) + ; CHECK-NEXT: $vgpr0 = COPY [[ANYEXT]](s32) + %0:_(s32) = COPY $vgpr0 + %1:_(s16) = G_TRUNC %0:_(s32) + %2:_(s16) = G_FNEG %1:_ + %3:_(s16) = G_FABS %2:_ + %4:_(s32) = G_ANYEXT %3:_(s16) + $vgpr0 = COPY %4:_(s32) + +... +--- +name: test_f32 +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0 + + ; CHECK-LABEL: name: test_f32 + ; CHECK: liveins: $vgpr0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; CHECK-NEXT: [[FABS:%[0-9]+]]:_(s32) = G_FABS [[COPY]] + ; CHECK-NEXT: $vgpr0 = COPY [[FABS]](s32) + %0:_(s32) = COPY $vgpr0 + %1:_(s32) = G_FNEG %0 + %2:_(s32) = G_FABS %1 + $vgpr0 = COPY %2(s32) + +... +--- +name: test_f64 +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0_vgpr1 + + ; CHECK-LABEL: name: test_f64 + ; CHECK: liveins: $vgpr0_vgpr1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1 + ; CHECK-NEXT: [[FABS:%[0-9]+]]:_(s64) = G_FABS [[COPY]] + ; CHECK-NEXT: $vgpr0_vgpr1 = COPY [[FABS]](s64) + %0:_(s64) = COPY $vgpr0_vgpr1 + %1:_(s64) = G_FNEG %0 + %2:_(s64) = G_FABS %1 + $vgpr0_vgpr1 = COPY %2(s64) + +... +--- +name: test_v2f16 +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0 + + ; CHECK-LABEL: name: test_v2f16 + ; CHECK: liveins: $vgpr0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 + ; CHECK-NEXT: [[FABS:%[0-9]+]]:_(<2 x s16>) = G_FABS [[COPY]] + ; CHECK-NEXT: $vgpr0 = COPY [[FABS]](<2 x s16>) + %0:_(<2 x s16>) = COPY $vgpr0 + %1:_(<2 x s16>) = G_FNEG %0 + %2:_(<2 x s16>) = G_FABS %1 + $vgpr0 = COPY %2(<2 x s16>) + +... +--- +name: test_v3f32 +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0_vgpr1_vgpr2 + + ; CHECK-LABEL: name: test_v3f32 + ; CHECK: liveins: $vgpr0_vgpr1_vgpr2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr0_vgpr1_vgpr2 + ; CHECK-NEXT: [[FABS:%[0-9]+]]:_(<3 x s32>) = G_FABS [[COPY]] + ; CHECK-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[FABS]](<3 x s32>) + %0:_(<3 x s32>) = COPY $vgpr0_vgpr1_vgpr2 + %1:_(<3 x s32>) = G_FNEG %0 + %2:_(<3 x s32>) = G_FABS %1 + $vgpr0_vgpr1_vgpr2 = COPY %2(<3 x s32>) + +... Index: llvm/test/CodeGen/AMDGPU/GlobalISel/roundeven.ll =================================================================== --- llvm/test/CodeGen/AMDGPU/GlobalISel/roundeven.ll +++ llvm/test/CodeGen/AMDGPU/GlobalISel/roundeven.ll @@ -563,18 +563,17 @@ ; GFX6-LABEL: v_roundeven_f64_fneg: ; GFX6: ; %bb.0: ; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX6-NEXT: v_xor_b32_e32 v2, 0x80000000, v1 -; GFX6-NEXT: v_and_b32_e32 v4, 0x80000000, v2 -; GFX6-NEXT: v_mov_b32_e32 v3, 0 -; GFX6-NEXT: v_or_b32_e32 v4, 0x43300000, v4 -; GFX6-NEXT: v_add_f64 v[5:6], -v[0:1], v[3:4] +; GFX6-NEXT: v_xor_b32_e32 v6, 0x80000000, v1 +; GFX6-NEXT: v_and_b32_e32 v3, 0x80000000, v6 +; GFX6-NEXT: v_mov_b32_e32 v2, 0 +; GFX6-NEXT: v_or_b32_e32 v3, 0x43300000, v3 +; GFX6-NEXT: v_add_f64 v[4:5], -v[0:1], v[2:3] ; GFX6-NEXT: s_mov_b32 s4, -1 -; GFX6-NEXT: v_mov_b32_e32 v1, v0 ; GFX6-NEXT: s_mov_b32 s5, 0x432fffff -; GFX6-NEXT: v_add_f64 v[3:4], v[5:6], -v[3:4] -; GFX6-NEXT: v_cmp_gt_f64_e64 vcc, |v[1:2]|, s[4:5] -; GFX6-NEXT: v_cndmask_b32_e32 v0, v3, v0, vcc -; GFX6-NEXT: v_cndmask_b32_e32 v1, v4, v2, vcc +; GFX6-NEXT: v_add_f64 v[2:3], v[4:5], -v[2:3] +; GFX6-NEXT: v_cmp_gt_f64_e64 vcc, |v[0:1]|, s[4:5] +; GFX6-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc +; GFX6-NEXT: v_cndmask_b32_e32 v1, v3, v6, vcc ; GFX6-NEXT: s_setpc_b64 s[30:31] ; ; GFX7-LABEL: v_roundeven_f64_fneg: