diff --git a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h --- a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h +++ b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h @@ -294,6 +294,12 @@ bool applyBuildInstructionSteps(MachineInstr &MI, InstructionStepsMatchInfo &MatchInfo); + /// Match ashr (shl x, C), C -> sext_inreg (C) + bool matchAshrShlToSextInreg(MachineInstr &MI, + std::tuple &MatchInfo); + bool applyAshShlToSextInreg(MachineInstr &MI, + std::tuple &MatchInfo); + /// Try to transform \p MI by using all of the above /// combine functions. Returns true if changed. bool tryCombine(MachineInstr &MI); diff --git a/llvm/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h b/llvm/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h --- a/llvm/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h +++ b/llvm/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h @@ -251,6 +251,12 @@ return BinaryOp_match(L, R); } +template +inline BinaryOp_match +m_GAShr(const LHS &L, const RHS &R) { + return BinaryOp_match(L, R); +} + // Helper for unary instructions (G_[ZSA]EXT/G_TRUNC) etc template struct UnaryOp_match { SrcTy L; diff --git a/llvm/include/llvm/Target/GlobalISel/Combine.td b/llvm/include/llvm/Target/GlobalISel/Combine.td --- a/llvm/include/llvm/Target/GlobalISel/Combine.td +++ b/llvm/include/llvm/Target/GlobalISel/Combine.td @@ -284,6 +284,15 @@ (apply [{ return Helper.applyBuildInstructionSteps(*${root}, ${info});}]) >; +// Fold ashr (shl x, C), C -> sext_inreg (C) +def shl_ashr_to_sext_inreg_matchinfo : GIDefMatchData<"std::tuple">; +def shl_ashr_to_sext_inreg : GICombineRule< + (defs root:$root, shl_ashr_to_sext_inreg_matchinfo:$info), + (match (wip_match_opcode G_ASHR): $root, + [{ return Helper.matchAshrShlToSextInreg(*${root}, ${info}); }]), + (apply [{ return Helper.applyAshShlToSextInreg(*${root}, ${info});}]) +>; + // FIXME: These should use the custom predicate feature once it lands. def undef_combines : GICombineGroup<[undef_to_fp_zero, undef_to_int_zero, undef_to_negative_one, diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp --- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp +++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp @@ -1887,6 +1887,32 @@ return true; } +bool CombinerHelper::matchAshrShlToSextInreg( + MachineInstr &MI, std::tuple &MatchInfo) { + assert(MI.getOpcode() == TargetOpcode::G_ASHR); + int64_t ShlCst, AshrCst; + Register Src; + if (!mi_match(MI.getOperand(0).getReg(), MRI, + m_GAShr(m_GShl(m_Reg(Src), m_ICst(ShlCst)), m_ICst(AshrCst)))) + return false; + if (ShlCst != AshrCst) + return false; + MatchInfo = {Src, ShlCst}; + return true; +} +bool CombinerHelper::applyAshShlToSextInreg( + MachineInstr &MI, std::tuple &MatchInfo) { + assert(MI.getOpcode() == TargetOpcode::G_ASHR); + Register Src; + int64_t ShiftAmt; + std::tie(Src, ShiftAmt) = MatchInfo; + unsigned Size = MRI.getType(Src).getScalarSizeInBits(); + Builder.setInstrAndDebugLoc(MI); + Builder.buildSExtInReg(MI.getOperand(0).getReg(), Src, Size - ShiftAmt); + MI.eraseFromParent(); + return true; +} + bool CombinerHelper::tryCombine(MachineInstr &MI) { if (tryCombineCopy(MI)) return true; diff --git a/llvm/lib/Target/AArch64/AArch64Combine.td b/llvm/lib/Target/AArch64/AArch64Combine.td --- a/llvm/lib/Target/AArch64/AArch64Combine.td +++ b/llvm/lib/Target/AArch64/AArch64Combine.td @@ -20,7 +20,8 @@ def AArch64PreLegalizerCombinerHelper: GICombinerHelper< "AArch64GenPreLegalizerCombinerHelper", [all_combines, elide_br_by_inverting_cond, - fconstant_to_constant]> { + fconstant_to_constant, + shl_ashr_to_sext_inreg]> { let DisableRuleOption = "aarch64prelegalizercombiner-disable-rule"; let StateClass = "AArch64PreLegalizerCombinerHelperState"; let AdditionalArguments = []; diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-ashr-shl-to-sext-inreg.mir b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-ashr-shl-to-sext-inreg.mir new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-ashr-shl-to-sext-inreg.mir @@ -0,0 +1,62 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -mtriple aarch64 -run-pass=aarch64-prelegalizer-combiner -verify-machineinstrs %s -o - | FileCheck %s +--- +name: ashr_shl_to_sext_inreg +alignment: 4 +tracksRegLiveness: true +liveins: + - { reg: '$w0' } +body: | + bb.1: + liveins: $w0 + + ; CHECK-LABEL: name: ashr_shl_to_sext_inreg + ; CHECK: liveins: $w0 + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0 + ; CHECK: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32) + ; CHECK: [[SEXT_INREG:%[0-9]+]]:_(s16) = G_SEXT_INREG [[TRUNC]], 8 + ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[SEXT_INREG]](s16) + ; CHECK: $w0 = COPY [[ANYEXT]](s32) + ; CHECK: RET_ReallyLR implicit $w0 + %1:_(s32) = COPY $w0 + %0:_(s16) = G_TRUNC %1(s32) + %2:_(s16) = G_CONSTANT i16 8 + %3:_(s16) = G_SHL %0, %2(s16) + %4:_(s16) = exact G_ASHR %3, %2(s16) + %5:_(s32) = G_ANYEXT %4(s16) + $w0 = COPY %5(s32) + RET_ReallyLR implicit $w0 + +... +--- +name: different_shift_amts +alignment: 4 +tracksRegLiveness: true +liveins: + - { reg: '$w0' } +body: | + bb.1: + liveins: $w0 + + ; CHECK-LABEL: name: different_shift_amts + ; CHECK: liveins: $w0 + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0 + ; CHECK: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32) + ; CHECK: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 12 + ; CHECK: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 8 + ; CHECK: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC]], [[C]](s16) + ; CHECK: [[ASHR:%[0-9]+]]:_(s16) = exact G_ASHR [[SHL]], [[C1]](s16) + ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ASHR]](s16) + ; CHECK: $w0 = COPY [[ANYEXT]](s32) + ; CHECK: RET_ReallyLR implicit $w0 + %1:_(s32) = COPY $w0 + %0:_(s16) = G_TRUNC %1(s32) + %2:_(s16) = G_CONSTANT i16 12 + %4:_(s16) = G_CONSTANT i16 8 + %3:_(s16) = G_SHL %0, %2(s16) + %5:_(s16) = exact G_ASHR %3, %4(s16) + %6:_(s32) = G_ANYEXT %5(s16) + $w0 = COPY %6(s32) + RET_ReallyLR implicit $w0 + +...