Index: llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h =================================================================== --- llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h +++ llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h @@ -423,13 +423,14 @@ std::tuple &MatchInfo); bool applyAshShlToSextInreg(MachineInstr &MI, std::tuple &MatchInfo); + /// \return true if \p MI is a G_AND instruction whose RHS is a mask where /// LHS & mask == LHS. (E.g., an all-ones value.) /// /// \param [in] MI - The G_AND instruction. /// \param [out] Replacement - A register the G_AND should be replaced with on /// success. - bool matchAndWithTrivialMask(MachineInstr &MI, Register &Replacement); + bool matchAndWithRedundantMask(MachineInstr &MI, Register &Replacement); /// \return true if \p MI is a G_SEXT_INREG that can be erased. bool matchRedundantSExtInReg(MachineInstr &MI); Index: llvm/include/llvm/Target/GlobalISel/Combine.td =================================================================== --- llvm/include/llvm/Target/GlobalISel/Combine.td +++ llvm/include/llvm/Target/GlobalISel/Combine.td @@ -381,12 +381,13 @@ [{ return Helper.matchAshrShlToSextInreg(*${root}, ${info}); }]), (apply [{ return Helper.applyAshShlToSextInreg(*${root}, ${info});}]) >; + // Fold (x & mask) -> x when (x & mask) is known to equal x. -def and_trivial_mask_matchinfo : GIDefMatchData<"Register">; -def and_trivial_mask: GICombineRule < - (defs root:$root, and_trivial_mask_matchinfo:$matchinfo), +def and_redundant_mask_matchinfo : GIDefMatchData<"Register">; +def and_redundant_mask: GICombineRule < + (defs root:$root, and_redundant_mask_matchinfo:$matchinfo), (match (wip_match_opcode G_AND):$root, - [{ return Helper.matchAndWithTrivialMask(*${root}, ${matchinfo}); }]), + [{ return Helper.matchAndWithRedundantMask(*${root}, ${matchinfo}); }]), (apply [{ return Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }]) >; @@ -551,7 +552,7 @@ def const_combines : GICombineGroup<[constant_fp_op, const_ptradd_to_i2p]>; def known_bits_simplifications : GICombineGroup<[ - and_trivial_mask, redundant_sext_inreg]>; + and_redundant_mask, redundant_sext_inreg]>; def width_reduction_combines : GICombineGroup<[reduce_shl_of_extend]>; Index: llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp =================================================================== --- llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp +++ llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp @@ -2826,11 +2826,11 @@ return true; } -bool CombinerHelper::matchAndWithTrivialMask(MachineInstr &MI, - Register &Replacement) { +bool CombinerHelper::matchAndWithRedundantMask(MachineInstr &MI, + Register &Replacement) { // Given // - // %mask:_(sN) = G_CONSTANT iN 000...0111...1 + // %mask:_(sN) = G_CONSTANT iN %c // %x:_(sN) = G_SOMETHING // %y:_(sN) = G_AND %x, %mask // @@ -2847,29 +2847,42 @@ if (!KB) return false; - // Replacement = %x, AndDst = %y. Check that we can replace AndDst with the - // LHS of the G_AND. - Replacement = MI.getOperand(1).getReg(); Register AndDst = MI.getOperand(0).getReg(); LLT DstTy = MRI.getType(AndDst); // FIXME: This should be removed once GISelKnownBits supports vectors. if (DstTy.isVector()) return false; - if (!canReplaceReg(AndDst, Replacement, MRI)) - return false; - // Check that we have a constant on the RHS of the G_AND, which is of the form - // 000...0111...1. - int64_t Cst; - if (!mi_match(MI.getOperand(2).getReg(), MRI, m_ICst(Cst))) - return false; - APInt Mask(DstTy.getSizeInBits(), Cst); - if (!Mask.isMask()) - return false; + Register LHS = MI.getOperand(1).getReg(); + Register RHS = MI.getOperand(2).getReg(); - // Now, let's check that x & Mask == x. If this is true, then x & ~Mask == 0. - return KB->maskedValueIsZero(Replacement, ~Mask); + // While we need to know all bits of the Mask, for x we only need the bits + // that go against zeros in Mask. + KnownBits LHSBits = KB->getKnownBits(LHS); + KnownBits RHSBits = KB->getKnownBits(RHS); + + // Check that x & Mask == x. + // x & 1 == x, always + // x & 0 == x, only if x is also 0 + // Meaning Mask has no effect if all it's zeros match zeros in x. + // + // Check that we can replace AndDst with the LHS of the G_AND and that all + // zero bits in RHS are also zero in LHS. + if (RHSBits.isConstant() && canReplaceReg(AndDst, LHS, MRI) && + RHSBits.Zero.isSubsetOf(LHSBits.Zero)) { + Replacement = LHS; + return true; + } + + // And is commutative so we need to check both cases. + if (LHSBits.isConstant() && canReplaceReg(AndDst, RHS, MRI) && + LHSBits.Zero.isSubsetOf(RHSBits.Zero)) { + Replacement = RHS; + return true; + } + + return false; } bool CombinerHelper::matchRedundantSExtInReg(MachineInstr &MI) { Index: llvm/lib/Target/AArch64/AArch64Combine.td =================================================================== --- llvm/lib/Target/AArch64/AArch64Combine.td +++ llvm/lib/Target/AArch64/AArch64Combine.td @@ -110,6 +110,6 @@ [copy_prop, erase_undef_store, combines_for_extload, sext_trunc_sextload, hoist_logic_op_with_same_opcode_hands, - and_trivial_mask, xor_of_and_with_same_reg]> { + and_redundant_mask, xor_of_and_with_same_reg]> { let DisableRuleOption = "aarch64postlegalizercombiner-disable-rule"; } Index: llvm/test/CodeGen/AMDGPU/GlobalISel/combine-and-redundant.mir =================================================================== --- /dev/null +++ llvm/test/CodeGen/AMDGPU/GlobalISel/combine-and-redundant.mir @@ -0,0 +1,119 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -march=amdgcn -run-pass=amdgpu-prelegalizer-combiner -verify-machineinstrs %s -o - | FileCheck %s + +--- +name: test_const_const_lhs +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_const_const_lhs + ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 15 + ; CHECK: $sgpr0 = COPY [[C]](s32) + ; CHECK: SI_RETURN_TO_EPILOG implicit $sgpr0 + %0:_(s32) = G_CONSTANT i32 15 + %1:_(s32) = G_CONSTANT i32 255 + %2:_(s32) = G_AND %0(s32), %1(s32) + $sgpr0 = COPY %2(s32) + SI_RETURN_TO_EPILOG implicit $sgpr0 +... + +--- +name: test_const_const_rhs +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_const_const_rhs + ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 15 + ; CHECK: $vgpr0 = COPY [[C]](s32) + ; CHECK: SI_RETURN_TO_EPILOG implicit $vgpr0 + %0:_(s32) = G_CONSTANT i32 15 + %1:_(s32) = G_CONSTANT i32 255 + %2:_(s32) = G_AND %1(s32), %0(s32) + $vgpr0 = COPY %2(s32) + SI_RETURN_TO_EPILOG implicit $vgpr0 +... + +--- +name: test_and_consts +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_and_consts + ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 15 + ; CHECK: $sgpr0 = COPY [[C]](s32) + ; CHECK: SI_RETURN_TO_EPILOG implicit $sgpr0 + %0:_(s32) = G_CONSTANT i32 15 + %1:_(s32) = G_CONSTANT i32 255 + %2:_(s32) = G_AND %0(s32), %1(s32) + $sgpr0 = COPY %2(s32) + SI_RETURN_TO_EPILOG implicit $sgpr0 +... + +--- +name: test_and_and +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0 + + ; CHECK-LABEL: name: test_and_and + ; CHECK: liveins: $vgpr0 + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 15 + ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C]] + ; CHECK: $vgpr0 = COPY [[AND]](s32) + ; CHECK: SI_RETURN_TO_EPILOG implicit $vgpr0 + %0:_(s32) = COPY $vgpr0 + %1:_(s32) = G_CONSTANT i32 15 + %2:_(s32) = G_CONSTANT i32 255 + %3:_(s32) = G_AND %0, %1(s32) + %4:_(s32) = G_AND %3, %2 + $vgpr0 = COPY %4(s32) + SI_RETURN_TO_EPILOG implicit $vgpr0 +... + +--- +name: test_shl_and +tracksRegLiveness: true +body: | + bb.0: + liveins: $sgpr0 + + ; CHECK-LABEL: name: test_shl_and + ; CHECK: liveins: $sgpr0 + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr0 + ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 5 + ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY]], [[C]](s32) + ; CHECK: $sgpr0 = COPY [[SHL]](s32) + ; CHECK: SI_RETURN_TO_EPILOG implicit $sgpr0 + %0:_(s32) = COPY $sgpr0 + %1:_(s32) = G_CONSTANT i32 5 + %2:_(s32) = G_CONSTANT i32 4294967264 + %3:_(s32) = G_SHL %0, %1(s32) + %4:_(s32) = G_AND %3, %2 + $sgpr0 = COPY %4(s32) + SI_RETURN_TO_EPILOG implicit $sgpr0 +... + +--- +name: test_lshr_and +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0 + + ; CHECK-LABEL: name: test_lshr_and + ; CHECK: liveins: $vgpr0 + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 5 + ; CHECK: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C]](s32) + ; CHECK: $vgpr0 = COPY [[LSHR]](s32) + ; CHECK: SI_RETURN_TO_EPILOG implicit $vgpr0 + %0:_(s32) = COPY $vgpr0 + %1:_(s32) = G_CONSTANT i32 5 + %2:_(s32) = G_CONSTANT i32 134217727 + %3:_(s32) = G_LSHR %0, %1(s32) + %4:_(s32) = G_AND %3, %2 + $vgpr0 = COPY %4(s32) + SI_RETURN_TO_EPILOG implicit $vgpr0 +... Index: llvm/test/CodeGen/AMDGPU/GlobalISel/combine-shift-of-shifted-logic.ll =================================================================== --- llvm/test/CodeGen/AMDGPU/GlobalISel/combine-shift-of-shifted-logic.ll +++ llvm/test/CodeGen/AMDGPU/GlobalISel/combine-shift-of-shifted-logic.ll @@ -5,7 +5,6 @@ ; CHECK-LABEL: test_shl_and_1: ; CHECK: ; %bb.0: ; %.entry ; CHECK-NEXT: s_lshl_b32 s0, s0, 4 -; CHECK-NEXT: s_and_b32 s0, s0, -16 ; CHECK-NEXT: ; return to shader part epilog .entry: %z1 = shl i32 %arg1, 2 @@ -18,7 +17,6 @@ ; CHECK-LABEL: test_shl_and_2: ; CHECK: ; %bb.0: ; %.entry ; CHECK-NEXT: s_lshl_b32 s0, s0, 8 -; CHECK-NEXT: s_and_b32 s0, s0, 0xffffff00 ; CHECK-NEXT: ; return to shader part epilog .entry: %z1 = shl i32 %arg1, 5 @@ -44,7 +42,6 @@ ; CHECK-LABEL: test_lshr_and_1: ; CHECK: ; %bb.0: ; %.entry ; CHECK-NEXT: s_lshr_b32 s0, s0, 4 -; CHECK-NEXT: s_and_b32 s0, s0, 0xfffffff ; CHECK-NEXT: ; return to shader part epilog .entry: %z1 = lshr i32 %arg1, 2 @@ -70,7 +67,6 @@ ; CHECK-LABEL: test_lshr_and_3: ; CHECK: ; %bb.0: ; %.entry ; CHECK-NEXT: s_lshr_b32 s0, s0, 5 -; CHECK-NEXT: s_and_b32 s0, s0, 0x7ffffff ; CHECK-NEXT: ; return to shader part epilog .entry: %z1 = lshr i32 %arg1, 3