diff --git a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h --- a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h +++ b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h @@ -95,7 +95,11 @@ /// If \p MI is extend that consumes the result of a load, try to combine it. /// Returns true if MI changed. bool tryCombineExtendingLoads(MachineInstr &MI); - bool matchCombineExtendingLoads(MachineInstr &MI, PreferredTuple &MatchInfo); + /// If \p EnableAnyExt is true then also true to match anyext(load). + bool matchCombineExtendingLoads(MachineInstr &MI, PreferredTuple &MatchInfo, + bool EnableAnyExt = true); + bool matchCombineExtendingLoadsNoAnyExt(MachineInstr &MI, + PreferredTuple &MatchInfo); void applyCombineExtendingLoads(MachineInstr &MI, PreferredTuple &MatchInfo); /// Combine \p MI into a pre-indexed or post-indexed load/store operation if diff --git a/llvm/include/llvm/Target/GlobalISel/Combine.td b/llvm/include/llvm/Target/GlobalISel/Combine.td --- a/llvm/include/llvm/Target/GlobalISel/Combine.td +++ b/llvm/include/llvm/Target/GlobalISel/Combine.td @@ -112,8 +112,15 @@ (match (wip_match_opcode G_LOAD, G_SEXTLOAD, G_ZEXTLOAD):$root, [{ return Helper.matchCombineExtendingLoads(*${root}, ${matchinfo}); }]), (apply [{ Helper.applyCombineExtendingLoads(*${root}, ${matchinfo}); }])>; + def combines_for_extload: GICombineGroup<[extending_loads]>; +def extending_loads_no_anyext : GICombineRule< + (defs root:$root, extending_load_matchdata:$matchinfo), + (match (wip_match_opcode G_LOAD, G_SEXTLOAD, G_ZEXTLOAD):$root, + [{ return Helper.matchCombineExtendingLoadsNoAnyExt(*${root}, ${matchinfo}); }]), + (apply [{ Helper.applyCombineExtendingLoads(*${root}, ${matchinfo}); }])>; + def combine_indexed_load_store : GICombineRule< (defs root:$root, indexed_load_store_matchdata:$matchinfo), (match (wip_match_opcode G_LOAD, G_SEXTLOAD, G_ZEXTLOAD, G_STORE):$root, diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp --- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp +++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp @@ -357,8 +357,14 @@ return false; } +bool CombinerHelper::matchCombineExtendingLoadsNoAnyExt( + MachineInstr &MI, PreferredTuple &Preferred) { + return matchCombineExtendingLoads(MI, Preferred, false); +} + bool CombinerHelper::matchCombineExtendingLoads(MachineInstr &MI, - PreferredTuple &Preferred) { + PreferredTuple &Preferred, + bool EnableAnyExt) { // We match the loads and follow the uses to the extend instead of matching // the extends and following the def to the load. This is because the load // must remain in the same position for correctness (unless we also add code @@ -405,7 +411,7 @@ for (auto &UseMI : MRI.use_nodbg_instructions(LoadValue.getReg())) { if (UseMI.getOpcode() == TargetOpcode::G_SEXT || UseMI.getOpcode() == TargetOpcode::G_ZEXT || - UseMI.getOpcode() == TargetOpcode::G_ANYEXT) { + (UseMI.getOpcode() == TargetOpcode::G_ANYEXT && EnableAnyExt)) { Preferred = ChoosePreferredUse(Preferred, MRI.getType(UseMI.getOperand(0).getReg()), UseMI.getOpcode(), &UseMI); diff --git a/llvm/lib/Target/AArch64/AArch64Combine.td b/llvm/lib/Target/AArch64/AArch64Combine.td --- a/llvm/lib/Target/AArch64/AArch64Combine.td +++ b/llvm/lib/Target/AArch64/AArch64Combine.td @@ -24,7 +24,8 @@ let DisableRuleOption = "aarch64prelegalizercombiner-disable-rule"; } -def AArch64PostLegalizerCombinerHelper: GICombinerHelper< - "AArch64GenPostLegalizerCombinerHelper", [erase_undef_store]> { +def AArch64PostLegalizerCombinerHelper + : GICombinerHelper<"AArch64GenPostLegalizerCombinerHelper", + [erase_undef_store, extending_loads_no_anyext]> { let DisableRuleOption = "aarch64postlegalizercombiner-disable-rule"; } diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizercombiner-extending-loads.mir b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizercombiner-extending-loads.mir new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizercombiner-extending-loads.mir @@ -0,0 +1,50 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -run-pass=aarch64-postlegalizer-combiner -global-isel -verify-machineinstrs %s -o - | FileCheck %s + +--- | + target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128" + target triple = "aarch64--" + define void @test_zeroext(i8* %addr) { + entry: + ret void + } + define void @test_no_anyext(i8* %addr) { + entry: + ret void + } +... + +--- +name: test_zeroext +legalized: true +body: | + bb.0.entry: + liveins: $x0 + ; CHECK-LABEL: name: test_zeroext + ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 + ; CHECK: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load 1 from %ir.addr) + ; CHECK: $w0 = COPY [[ZEXTLOAD]](s32) + %0:_(p0) = COPY $x0 + %1:_(s8) = G_LOAD %0 :: (load 1 from %ir.addr) + %2:_(s32) = G_ZEXT %1 + $w0 = COPY %2 +... + +--- +name: test_no_anyext +legalized: true +body: | + bb.0.entry: + liveins: $x0 + ; Check that we don't try to do an anyext combine. We don't want to do this + ; because an anyexting load like s64 = G_LOAD %p (load 4) isn't legal. + ; CHECK-LABEL: name: test_no_anyext + ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 + ; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 4 from %ir.addr) + ; CHECK: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[LOAD]](s32) + ; CHECK: $x0 = COPY [[ANYEXT]](s64) + %0:_(p0) = COPY $x0 + %1:_(s32) = G_LOAD %0 :: (load 4 from %ir.addr) + %2:_(s64) = G_ANYEXT %1 + $x0 = COPY %2 +...