Index: lib/Target/AArch64/AArch64RedundantCopyElimination.cpp =================================================================== --- lib/Target/AArch64/AArch64RedundantCopyElimination.cpp +++ lib/Target/AArch64/AArch64RedundantCopyElimination.cpp @@ -12,13 +12,14 @@ // CBZW %W0, // BB#2: // %W0 = COPY %WZR -// This pass should be run after register allocation. +// Similarly, this pass also handles non-zero copies. +// BB#0: +// cmp x0, #1 +// b.eq .LBB0_1 +// .LBB0_1: +// orr x0, xzr, #0x1 // -// FIXME: This should be extended to handle any constant other than zero. E.g., -// cmp w0, #1 -// b.eq .BB1 -// BB1: -// mov w0, #1 +// This pass should be run after register allocation. // // FIXME: This could also be extended to check the whole dominance subtree below // the comparison if the compile time regression is acceptable. @@ -43,7 +44,8 @@ class AArch64RedundantCopyElimination : public MachineFunctionPass { const MachineRegisterInfo *MRI; const TargetRegisterInfo *TRI; - BitVector ClobberedRegs; + BitVector CopyClobberedRegs; + BitVector KnownRegValClobberedRegs; public: static char ID; @@ -51,6 +53,15 @@ initializeAArch64RedundantCopyEliminationPass( *PassRegistry::getPassRegistry()); } + + typedef struct RegImm { + MCPhysReg Reg; + int64_t Imm; + RegImm(MCPhysReg Reg, int64_t Imm) : Reg(Reg), Imm(Imm) {} + } RegImm_t; + + MachineInstr *knownRegValInBlock(MachineInstr &CondBr, + MachineBasicBlock *MBB); bool optimizeCopy(MachineBasicBlock *MBB); bool runOnMachineFunction(MachineFunction &MF) override; MachineFunctionProperties getRequiredProperties() const override { @@ -99,6 +110,80 @@ } } +/// Check if the basic block \p MBB is the target block to which a conditional +/// branch \p CondBr jumps and whos equality comparison is against a constant. +/// If true, return the compare instruction the branch is predicated upon. +/// Otherwise, return nullptr. +MachineInstr * +AArch64RedundantCopyElimination::knownRegValInBlock(MachineInstr &CondBr, + MachineBasicBlock *MBB) { + unsigned Opc = CondBr.getOpcode(); + // Must be a conditional branch. + if (Opc != AArch64::Bcc) + return nullptr; + + // Must be an equality check (i.e., == or !=). + AArch64CC::CondCode CC = (AArch64CC::CondCode)CondBr.getOperand(0).getImm(); + if (CC != AArch64CC::EQ && CC != AArch64CC::NE) + return nullptr; + + MachineBasicBlock *BrTarget = CondBr.getOperand(1).getMBB(); + if ((CC == AArch64CC::EQ && BrTarget != MBB) || + (CC == AArch64CC::NE && BrTarget == MBB)) + return nullptr; + + // Stop if we get to the beginning of PredMBB. + MachineBasicBlock *PredMBB = *MBB->pred_begin(); + assert(PredMBB == CondBr.getParent() && + "Conditional branch not in predecessor block!"); + if (CondBr == PredMBB->begin()) + return nullptr; + + // Track which registers have been clobbered. + KnownRegValClobberedRegs.reset(); + + // Find compare instruction that sets NZCV used by CondBr. + MachineBasicBlock::reverse_iterator RIt = CondBr.getReverseIterator(); + for (MachineInstr &PredI : make_range(std::next(RIt), PredMBB->rend())) { + switch (PredI.getOpcode()) { + default: + break; + + // CMP is an alias for SUBS with a dead destination register. + case AArch64::SUBSWri: + case AArch64::SUBSXri: { + unsigned SrcReg = PredI.getOperand(1).getReg(); + unsigned DestReg = PredI.getOperand(0).getReg(); + // Must not be a symbolic immediate. + if (!PredI.getOperand(2).isImm()) + return nullptr; + + // FIXME: For simplicity, give up on non-zero shifts. + if (PredI.getOperand(3).getImm()) + return nullptr; + + // The src register must not be modified between the cmp and conditional + // branch. This include a self-clobbering compare. + if (KnownRegValClobberedRegs[PredI.getOperand(1).getReg()] || + (DestReg != AArch64::WZR && DestReg != AArch64::XZR && + DestReg == SrcReg)) + return nullptr; + + // We've found the Cmp that sets NZCV. + return &PredI; + } + } + + // Bail if we see an instruction that defines NZCV that we don't handle. + if (PredI.definesRegister(AArch64::NZCV)) + return nullptr; + + // Track clobbered registers. + trackRegDefs(PredI, KnownRegValClobberedRegs, TRI); + } + return nullptr; +} + bool AArch64RedundantCopyElimination::optimizeCopy(MachineBasicBlock *MBB) { // Check if the current basic block has a single predecessor. if (MBB->pred_size() != 1) @@ -110,46 +195,58 @@ if (PredMBB->succ_size() != 2) return false; - MachineBasicBlock::iterator CompBr = PredMBB->getLastNonDebugInstr(); - if (CompBr == PredMBB->end()) + MachineBasicBlock::iterator CondBr = PredMBB->getLastNonDebugInstr(); + if (CondBr == PredMBB->end()) return false; // Keep track of the earliest point in the PredMBB block where kill markers // need to be removed if a COPY is removed. MachineBasicBlock::iterator FirstUse; - // Registers that are known to contain zeros at the start of MBB. - SmallVector KnownZeroRegs; - // Registers clobbered in PredMBB between CompBr instruction and current + // Registers that contain a known value at the start of MBB. + SmallVector KnownRegs; + // Registers clobbered in PredMBB between CondBr instruction and current // instruction being checked in loop. - ClobberedRegs.reset(); - ++CompBr; + CopyClobberedRegs.reset(); + + MachineBasicBlock::iterator Itr = std::next(CondBr); do { - --CompBr; - if (!guaranteesZeroRegInBlock(*CompBr, MBB)) + --Itr; + dbgs() << "Itr: " << *Itr << '\n'; + if (guaranteesZeroRegInBlock(*Itr, MBB)) { + // Itr is a CBZ/CBNZ in this context. + MCPhysReg KnownReg = Itr->getOperand(0).getReg(); + KnownRegs.push_back(RegImm(KnownReg, 0)); + FirstUse = Itr; + } else if (MachineInstr *Cmp = knownRegValInBlock(*Itr, MBB)) { + // Itr is a conditional branch (i.e., Bcc) and Cmp is a compare + // (i.e., SUBS) in this context. + MCPhysReg KnownReg = Cmp->getOperand(1).getReg(); + int64_t ImmVal = Cmp->getOperand(2).getImm(); + KnownRegs.push_back(RegImm(KnownReg, ImmVal)); + FirstUse = Itr; + } else continue; - KnownZeroRegs.push_back(CompBr->getOperand(0).getReg()); - FirstUse = CompBr; - // Look backward in PredMBB for COPYs from the known zero reg to - // find other registers that are known to be zero. - for (auto PredI = CompBr;; --PredI) { + // Look backward in PredMBB for COPYs from the known reg to find other + // registers that are known to be a constant value. + for (auto PredI = Itr;; --PredI) { if (PredI->isCopy()) { MCPhysReg CopyDstReg = PredI->getOperand(0).getReg(); MCPhysReg CopySrcReg = PredI->getOperand(1).getReg(); - for (MCPhysReg KnownZeroReg : KnownZeroRegs) { - if (ClobberedRegs[KnownZeroReg]) + for (auto &KnownReg : KnownRegs) { + if (CopyClobberedRegs[KnownReg.Reg]) continue; // If we have X = COPY Y, and Y is known to be zero, then now X is // known to be zero. - if (CopySrcReg == KnownZeroReg && !ClobberedRegs[CopyDstReg]) { - KnownZeroRegs.push_back(CopyDstReg); + if (CopySrcReg == KnownReg.Reg && !CopyClobberedRegs[CopyDstReg]) { + KnownRegs.push_back(RegImm(CopyDstReg, KnownReg.Imm)); FirstUse = PredI; break; } // If we have X = COPY Y, and X is known to be zero, then now Y is // known to be zero. - if (CopyDstReg == KnownZeroReg && !ClobberedRegs[CopySrcReg]) { - KnownZeroRegs.push_back(CopySrcReg); + if (CopyDstReg == KnownReg.Reg && !CopyClobberedRegs[CopySrcReg]) { + KnownRegs.push_back(RegImm(CopySrcReg, KnownReg.Imm)); FirstUse = PredI; break; } @@ -160,70 +257,86 @@ if (PredI == PredMBB->begin()) break; - trackRegDefs(*PredI, ClobberedRegs, TRI); + trackRegDefs(*PredI, CopyClobberedRegs, TRI); // Stop if all of the known-zero regs have been clobbered. - if (all_of(KnownZeroRegs, [&](MCPhysReg KnownZeroReg) { - return ClobberedRegs[KnownZeroReg]; + if (all_of(KnownRegs, [&](RegImm_t KnownReg) { + return CopyClobberedRegs[KnownReg.Reg]; })) break; } break; - } while (CompBr != PredMBB->begin() && CompBr->isTerminator()); + } while (Itr != PredMBB->begin() && Itr->isTerminator()); - // We've not found a known zero register, time to bail out. - if (KnownZeroRegs.empty()) + // We've not found a registers with a known value, time to bail out. + if (KnownRegs.empty()) return false; bool Changed = false; - // UsedKnownZeroRegs is the set of KnownZeroRegs that have had uses added to MBB. - SmallSetVector UsedKnownZeroRegs; + // UsedKnownRegs is the set of KnownRegs that have had uses added to MBB. + SmallSetVector UsedKnownRegs; MachineBasicBlock::iterator LastChange = MBB->begin(); - // Remove redundant Copy instructions unless KnownZeroReg is modified. + // Remove redundant Copy instructions unless KnownReg is modified. for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); I != E;) { MachineInstr *MI = &*I; ++I; - bool RemovedCopy = false; - if (MI->isCopy()) { + bool RemovedMI = false; + bool IsCopy = MI->isCopy(); + bool IsMoveImm = MI->isMoveImmediate(); + if (IsCopy || IsMoveImm) { MCPhysReg DefReg = MI->getOperand(0).getReg(); - MCPhysReg SrcReg = MI->getOperand(1).getReg(); + MCPhysReg SrcReg = IsCopy ? MI->getOperand(1).getReg() : 0; + int64_t SrcImm = IsMoveImm ? MI->getOperand(1).getImm() : 0; + if (!MRI->isReserved(DefReg) && + ((IsCopy && (SrcReg == AArch64::XZR || SrcReg == AArch64::WZR)) || + IsMoveImm)) { + for (RegImm_t &KnownReg : KnownRegs) { + if (KnownReg.Reg != DefReg && + !TRI->isSuperRegister(DefReg, KnownReg.Reg)) + continue; - if ((SrcReg == AArch64::XZR || SrcReg == AArch64::WZR) && - !MRI->isReserved(DefReg)) { - for (MCPhysReg KnownZeroReg : KnownZeroRegs) { - if (KnownZeroReg == DefReg || - TRI->isSuperRegister(DefReg, KnownZeroReg)) { - DEBUG(dbgs() << "Remove redundant Copy : " << *MI); + // For a copy, the known value must be a zero. + if (IsCopy && KnownReg.Imm != 0) + continue; - MI->eraseFromParent(); - Changed = true; - LastChange = I; - NumCopiesRemoved++; - UsedKnownZeroRegs.insert(KnownZeroReg); - RemovedCopy = true; - break; - } + // For a move immediate, the known immediate must match the source + // immediate. + if (IsMoveImm && KnownReg.Imm != SrcImm) + continue; + + if (IsCopy) + DEBUG(dbgs() << "Remove redundant Copy : " << *MI); + else + DEBUG(dbgs() << "Remove redundant Move : " << *MI); + + MI->eraseFromParent(); + Changed = true; + LastChange = I; + NumCopiesRemoved++; + UsedKnownRegs.insert(KnownReg.Reg); + RemovedMI = true; + break; } } } - // Skip to the next instruction if we removed the COPY from WZR/XZR. - if (RemovedCopy) + // Skip to the next instruction if we removed the COPY/MovImm. + if (RemovedMI) continue; - // Remove any regs the MI clobbers from the KnownZeroRegs set. - for (unsigned RI = 0; RI < KnownZeroRegs.size();) - if (MI->modifiesRegister(KnownZeroRegs[RI], TRI)) { - std::swap(KnownZeroRegs[RI], KnownZeroRegs[KnownZeroRegs.size() - 1]); - KnownZeroRegs.pop_back(); + // Remove any regs the MI clobbers from the KnownConstRegs set. + for (unsigned RI = 0; RI < KnownRegs.size();) + if (MI->modifiesRegister(KnownRegs[RI].Reg, TRI)) { + std::swap(KnownRegs[RI], KnownRegs[KnownRegs.size() - 1]); + KnownRegs.pop_back(); // Don't increment RI since we need to now check the swapped-in - // KnownZeroRegs[RI]. + // KnownRegs[RI]. } else { ++RI; } - // Continue until the KnownZeroRegs set is empty. - if (KnownZeroRegs.empty()) + // Continue until the KnownRegs set is empty. + if (KnownRegs.empty()) break; } @@ -232,9 +345,9 @@ // Add newly used regs to the block's live-in list if they aren't there // already. - for (MCPhysReg KnownZeroReg : UsedKnownZeroRegs) - if (!MBB->isLiveIn(KnownZeroReg)) - MBB->addLiveIn(KnownZeroReg); + for (MCPhysReg KnownReg : UsedKnownRegs) + if (!MBB->isLiveIn(KnownReg)) + MBB->addLiveIn(KnownReg); // Clear kills in the range where changes were made. This is conservative, // but should be okay since kill markers are being phased out. @@ -252,7 +365,12 @@ return false; TRI = MF.getSubtarget().getRegisterInfo(); MRI = &MF.getRegInfo(); - ClobberedRegs.resize(TRI->getNumRegs()); + + // Resize the clobber register bitfield trackers. We do this once per + // function and then clear the bitfield each time we optimize a copy. + CopyClobberedRegs.resize(TRI->getNumRegs()); + KnownRegValClobberedRegs.resize(TRI->getNumRegs()); + bool Changed = false; for (MachineBasicBlock &MBB : MF) Changed |= optimizeCopy(&MBB); Index: test/CodeGen/AArch64/machine-copy-remove.mir =================================================================== --- test/CodeGen/AArch64/machine-copy-remove.mir +++ test/CodeGen/AArch64/machine-copy-remove.mir @@ -1,4 +1,4 @@ -# RUN: llc -mtriple=aarch64--linux-gnu -run-pass=aarch64-copyelim %s -verify-machineinstrs -o - 2>&1 | FileCheck %s +# RUN: llc -mtriple=aarch64--linux-gnu -run-pass=aarch64-copyelim %s -verify-machineinstrs -o - | FileCheck %s --- # Check that bb.0 COPY is seen through to allow the bb.1 COPY of XZR to be removed. # CHECK-LABEL: name: test1 @@ -293,3 +293,104 @@ RET_ReallyLR implicit %x0 ... +# CHECK-LABEL: name: test10 +# CHECK: bb.1: +# CHECK-NEXT: successors: %bb.3 +# CHECK-NEXT: liveins: %w0 +# CHECK-NOT: %w0 = MOVi32imm 1 +# CHECK: RET_ReallyLR +name: test10 +tracksRegLiveness: true +body: | + bb.0: + successors: %bb.1, %bb.2 + liveins: %w0, %x1 + + dead %wzr = SUBSWri killed %w0, 1, 0, implicit-def %nzcv + Bcc 1, %bb.2, implicit killed %nzcv + + bb.1: + successors: %bb.3 + + %w0 = MOVi32imm 1 ; MOVi32imm is redundant base on a dominating condition. + B %bb.3 + + bb.2: + successors: %bb.3 + liveins: %x1 + + %w0 = LDRWui killed %x1, 0 :: (load 4) + + bb.3: + liveins: %w0 + + RET_ReallyLR implicit %w0 + +... +# CHECK-LABEL: name: test11 +# CHECK: bb.1: +# CHECK-NEXT: successors: %bb.3 +# CHECK-NEXT: liveins: %x0 +# CHECK-NOT: %w0 = MOVi32imm 1 +# CHECK: RET_ReallyLR +name: test11 +tracksRegLiveness: true +body: | + bb.0: + successors: %bb.1, %bb.2 + liveins: %x0, %x1 + + dead %xzr = SUBSXri killed %x0, 1, 0, implicit-def %nzcv + Bcc 1, %bb.2, implicit killed %nzcv + + bb.1: + successors: %bb.3 + + %w0 = MOVi32imm 1, implicit-def %x0 ; MOVi32imm is redundant base on a dominating condition. + B %bb.3 + + bb.2: + successors: %bb.3 + liveins: %x1 + + %x0 = LDRXui killed %x1, 0 :: (load 8) + + bb.3: + liveins: %x0 + + RET_ReallyLR implicit %x0 +... +# Check that bb.0 COPY is seen through to allow the bb.1 MOVi32imm to be removed. +# CHECK-LABEL: name: test12 +# CHECK: bb.1: +# CHECK-NEXT: successors: %bb.3 +# CHECK-NEXT: liveins: %x0 +# CHECK-NOT: %w0 = MOVi32imm 1 +# CHECK: RET_ReallyLR +name: test12 +tracksRegLiveness: true +body: | + bb.0: + successors: %bb.1, %bb.2 + liveins: %x0, %x1 + + %x1 = COPY %x0 + dead %xzr = SUBSXri killed %x1, 1, 0, implicit-def %nzcv + Bcc 1, %bb.2, implicit killed %nzcv + + bb.1: + successors: %bb.3 + + %w0 = MOVi32imm 1, implicit-def %x0 ; MOVi32imm is redundant base on a dominating condition. + B %bb.3 + + bb.2: + successors: %bb.3 + liveins: %x1 + + %x0 = LDRXui killed %x1, 0 :: (load 8) + + bb.3: + liveins: %x0 + + RET_ReallyLR implicit %x0