Index: include/llvm/CodeGen/MachineCombinerPattern.h =================================================================== --- /dev/null +++ include/llvm/CodeGen/MachineCombinerPattern.h @@ -0,0 +1,29 @@ +//===-- llvm/CodeGen/MachineCombinerPattern.h - Instruction pattern supported by +// combiner ------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines instruction pattern supported by combiner +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CODEGEN_MACHINECOMBINERPATTERN_H +#define LLVM_CODEGEN_MACHINECOMBINERPATTERN_H + +namespace llvm { + +/// Enumeration of instruction pattern supported by machine combiner +/// +/// +namespace MachineCombinerPattern { +// Forward declaration +enum MC_PATTERN : int; +} // end namespace MachineCombinerPattern +} // end namespace llvm + +#endif Index: include/llvm/CodeGen/MachineTraceMetrics.h =================================================================== --- include/llvm/CodeGen/MachineTraceMetrics.h +++ include/llvm/CodeGen/MachineTraceMetrics.h @@ -264,8 +264,9 @@ /// classes are included. For the caller to account for extra machine /// instructions, it must first resolve each instruction's scheduling class. unsigned getResourceLength( - ArrayRef Extrablocks = None, - ArrayRef ExtraInstrs = None) const; + ArrayRef Extrablocks = None, + ArrayRef ExtraInstrs = None, + ArrayRef RemoveInstrs = None) const; /// Return the length of the (data dependency) critical path through the /// trace. @@ -286,6 +287,12 @@ /// Return the Depth of a PHI instruction in a trace center block successor. /// The PHI does not have to be part of the trace. unsigned getPHIDepth(const MachineInstr *PHI) const; + + /// A dependence is useful if the basic block of the defining instruction + /// is part of the trace of the user instruction. It is assumed that DefMI + /// dominates UseMI (see also isUsefulDominator). + bool isDepInTrace(const MachineInstr *DefMI, + const MachineInstr *UseMI) const; }; /// A trace ensemble is a collection of traces selected using the same Index: include/llvm/CodeGen/Passes.h =================================================================== --- include/llvm/CodeGen/Passes.h +++ include/llvm/CodeGen/Passes.h @@ -489,6 +489,10 @@ /// inserting cmov instructions. extern char &EarlyIfConverterID; + /// This pass performs instruction combining using trace metrics to estimate + /// critical-path and resource depth. + extern char &MachineCombinerID; + /// StackSlotColoring - This pass performs stack coloring and merging. /// It merges disjoint allocas to reduce the stack size. extern char &StackColoringID; Index: include/llvm/CodeGen/TargetSchedule.h =================================================================== --- include/llvm/CodeGen/TargetSchedule.h +++ include/llvm/CodeGen/TargetSchedule.h @@ -167,6 +167,7 @@ /// if converter after moving it to TargetSchedModel). unsigned computeInstrLatency(const MachineInstr *MI, bool UseDefaultDefLatency = true) const; + unsigned computeInstrLatency(unsigned Opcode) const; /// \brief Output dependency latency of a pair of defs of the same register. /// Index: include/llvm/InitializePasses.h =================================================================== --- include/llvm/InitializePasses.h +++ include/llvm/InitializePasses.h @@ -277,6 +277,7 @@ void initializeBBVectorizePass(PassRegistry&); void initializeMachineFunctionPrinterPassPass(PassRegistry&); void initializeStackMapLivenessPass(PassRegistry&); +void initializeMachineCombinerPass(PassRegistry &); void initializeLoadCombinePass(PassRegistry&); } Index: include/llvm/Target/TargetInstrInfo.h =================================================================== --- include/llvm/Target/TargetInstrInfo.h +++ include/llvm/Target/TargetInstrInfo.h @@ -15,9 +15,12 @@ #define LLVM_TARGET_TARGETINSTRINFO_H #include "llvm/ADT/SmallSet.h" +#include "llvm/ADT/DenseMap.h" #include "llvm/CodeGen/DFAPacketizer.h" #include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineCombinerPattern.h" #include "llvm/MC/MCInstrInfo.h" +#include "llvm/Target/TargetRegisterInfo.h" namespace llvm { @@ -563,6 +566,42 @@ const SmallVectorImpl &Ops, MachineInstr* LoadMI) const; + /// hasPattern - return true when there is potentially a faster code sequence + /// for an instruction chain ending in \p Root. All potential pattern are + /// returned in the \p Pattern vector. Pattern should be sorted in priority + /// order since the pattern evaluator stops checking as soon as it finds a + /// faster sequence. + /// \param Root - Instruction that could be combined with one of its operands + /// \param Pattern - Vector of possible combination pattern + + virtual bool hasPattern( + MachineInstr &Root, + SmallVectorImpl &Pattern) const { + return false; + } + + /// genAlternativeCodeSequence - when hasPattern() finds a pattern this + /// function generates the instructions that could replace the original code + /// sequence. The client has to decide whether the actual replacementment is + /// beneficial or not. + /// \param Root - Instruction that could be combined with one of its operands + /// \param P - Combination pattern for Root + /// \param InsInstr - Vector of new instructions that implement P + /// \param DelInstr - Old instructions, including Root, that could be replaced + /// by InsInstr + /// \param InstrIdxForVirtReg - map of virtual register to instruction in + /// InsInstr that defines it + virtual void genAlternativeCodeSequence( + MachineInstr &Root, MachineCombinerPattern::MC_PATTERN P, + SmallVectorImpl &InsInstrs, + SmallVectorImpl &DelInstrs, + DenseMap &InstrIdxForVirtReg) const { + return; + } + + /// useMachineCombiner - return true when a target supports MachineCombiner + virtual bool useMachineCombiner(void) const { return false; } + protected: /// foldMemoryOperandImpl - Target-dependent implementation for /// foldMemoryOperand. Target-independent code in foldMemoryOperand will Index: lib/CodeGen/CMakeLists.txt =================================================================== --- lib/CodeGen/CMakeLists.txt +++ lib/CodeGen/CMakeLists.txt @@ -49,6 +49,7 @@ MachineBranchProbabilityInfo.cpp MachineCSE.cpp MachineCodeEmitter.cpp + MachineCombiner.cpp MachineCopyPropagation.cpp MachineDominators.cpp MachineDominanceFrontier.cpp Index: lib/CodeGen/CodeGen.cpp =================================================================== --- lib/CodeGen/CodeGen.cpp +++ lib/CodeGen/CodeGen.cpp @@ -41,6 +41,7 @@ initializeMachineBlockPlacementPass(Registry); initializeMachineBlockPlacementStatsPass(Registry); initializeMachineCopyPropagationPass(Registry); + initializeMachineCombinerPass(Registry); initializeMachineCSEPass(Registry); initializeMachineDominatorTreePass(Registry); initializeMachinePostDominatorTreePass(Registry); Index: lib/CodeGen/MachineCombiner.cpp =================================================================== --- /dev/null +++ lib/CodeGen/MachineCombiner.cpp @@ -0,0 +1,434 @@ +//===---- MachineCombiner.cpp - Instcombining on SSA form machine code ----===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// The machine combiner pass uses machine trace metrics to ensure the combined +// instructions does not lengthen the critical path or the resource depth. +//===----------------------------------------------------------------------===// +#define DEBUG_TYPE "machine-combiner" + +#include "llvm/ADT/Statistic.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/CodeGen/MachineDominators.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineLoopInfo.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/MachineTraceMetrics.h" +#include "llvm/CodeGen/Passes.h" +#include "llvm/CodeGen/TargetSchedule.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/raw_ostream.h" +#include "llvm/Target/TargetInstrInfo.h" +#include "llvm/Target/TargetRegisterInfo.h" +#include "llvm/Target/TargetSubtargetInfo.h" + +using namespace llvm; + +STATISTIC(NumInstCombined, "Number of machineinst combined"); + +namespace { +class MachineCombiner : public MachineFunctionPass { + const TargetInstrInfo *TII; + const TargetRegisterInfo *TRI; + const MCSchedModel *SchedModel; + MachineRegisterInfo *MRI; + MachineTraceMetrics *Traces; + MachineTraceMetrics::Ensemble *MinInstr; + + TargetSchedModel TSchedModel; + + /// OptSize - True if optimizing for code size. + bool OptSize; + +public: + static char ID; + MachineCombiner() : MachineFunctionPass(ID) { + initializeMachineCombinerPass(*PassRegistry::getPassRegistry()); + } + void getAnalysisUsage(AnalysisUsage &AU) const override; + bool runOnMachineFunction(MachineFunction &MF) override; + const char *getPassName() const override { return "Machine InstCombiner"; } + +private: + bool doSubstitute(unsigned NewSize, unsigned OldSize); + bool combineInstructions(MachineBasicBlock *); + MachineInstr *getOperandDef(const MachineOperand &MO); + unsigned getDepth(SmallVectorImpl &InsInstrs, + DenseMap &InstrIdxForVirtReg, + MachineTraceMetrics::Trace BlockTrace); + unsigned getLatency(MachineInstr *Root, MachineInstr *NewRoot, + MachineTraceMetrics::Trace BlockTrace); + bool + preservesCriticalPathLen(MachineBasicBlock *MBB, MachineInstr *Root, + MachineTraceMetrics::Trace BlockTrace, + SmallVectorImpl &InsInstrs, + DenseMap &InstrIdxForVirtReg); + bool preservesResourceLen(MachineBasicBlock *MBB, + MachineTraceMetrics::Trace BlockTrace, + SmallVectorImpl &InsInstrs, + SmallVectorImpl &DelInstrs); + void instr2instrSC(SmallVectorImpl &Instrs, + SmallVectorImpl &InstrsSC); +}; +} + +char MachineCombiner::ID = 0; +char &llvm::MachineCombinerID = MachineCombiner::ID; + +INITIALIZE_PASS_BEGIN(MachineCombiner, "machine-combiner", + "Machine InstCombiner", false, false) +INITIALIZE_PASS_DEPENDENCY(MachineTraceMetrics) +INITIALIZE_PASS_END(MachineCombiner, "machine-combiner", "Machine InstCombiner", + false, false) + +void MachineCombiner::getAnalysisUsage(AnalysisUsage &AU) const { + AU.setPreservesCFG(); + AU.addPreserved(); + AU.addPreserved(); + AU.addRequired(); + AU.addPreserved(); + MachineFunctionPass::getAnalysisUsage(AU); +} + +MachineInstr *MachineCombiner::getOperandDef(const MachineOperand &MO) { + MachineInstr *DefInstr = nullptr; + // We need a virtual register definition. + if (MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg())) + DefInstr = MRI->getUniqueVRegDef(MO.getReg()); + // PHI's have no depth etc. + if (DefInstr && DefInstr->isPHI()) + DefInstr = nullptr; + return DefInstr; +} + +/// getDepth - Computes depth of instructions in vector \InsInstr. +/// +/// \param InsInstrs is a vector of machine instructions +/// \param InstrIdxForVirtReg is a dense map of virtual register to index +/// of defining machine instruction in \p InsInstrs +/// \param BlockTrace is a trace of machine instructions +/// +/// \returns Depth of last instruction in \InsInstrs ("NewRoot") +unsigned +MachineCombiner::getDepth(SmallVectorImpl &InsInstrs, + DenseMap &InstrIdxForVirtReg, + MachineTraceMetrics::Trace BlockTrace) { + + SmallVector InstrDepth; + assert(TSchedModel.hasInstrSchedModel() && "Missing machine model\n"); + + // Foreach instruction in in the new sequence compute the depth based on the + // operands. Use the trace information when possible. For new operands which + // are tracked in the InstrIdxForVirtReg map depth is looked up in InstrDepth + for (auto *InstrPtr : InsInstrs) { // for each Use + unsigned IDepth = 0; + DEBUG(dbgs() << "NEW INSTR "; InstrPtr->dump(); dbgs() << "\n";); + for (unsigned i = 0, e = InstrPtr->getNumOperands(); i != e; ++i) { + const MachineOperand &MO = InstrPtr->getOperand(i); + // Check for virtual register operand. + if (!(MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg()))) + continue; + if (!MO.isUse()) + continue; + unsigned DepthOp = 0; + unsigned LatencyOp = 0; + DenseMap::iterator II = + InstrIdxForVirtReg.find(MO.getReg()); + if (II != InstrIdxForVirtReg.end()) { + // Operand is new virtual register not in trace + assert(II->second >= 0 && II->second < InstrDepth.size() && + "Bad Index"); + MachineInstr *DefInstr = InsInstrs[II->second]; + assert(DefInstr && + "There must be a definition for a new virtual register"); + DepthOp = InstrDepth[II->second]; + LatencyOp = TSchedModel.computeOperandLatency( + DefInstr, DefInstr->findRegisterDefOperandIdx(MO.getReg()), + InstrPtr, InstrPtr->findRegisterUseOperandIdx(MO.getReg())); + } else { + MachineInstr *DefInstr = getOperandDef(MO); + if (DefInstr) { + DepthOp = BlockTrace.getInstrCycles(DefInstr).Depth; + LatencyOp = TSchedModel.computeOperandLatency( + DefInstr, DefInstr->findRegisterDefOperandIdx(MO.getReg()), + InstrPtr, InstrPtr->findRegisterUseOperandIdx(MO.getReg())); + } + } + IDepth = std::max(IDepth, DepthOp + LatencyOp); + } + InstrDepth.push_back(IDepth); + } + unsigned NewRootIdx = InsInstrs.size() - 1; + return InstrDepth[NewRootIdx]; +} + +/// getLatency - Computes instruction latency as max of latency of defined +/// operands +/// +/// \param Root is a machine instruction that could be replaced by NewRoot. +/// It is used to compute a more accurate latency information for NewRoot in +/// case there is a dependent instruction in the same trace (\p BlockTrace) +/// \param NewRoot is the instruction for which the latency is computed +/// \param BlockTrace is a trace of machine instructions +/// +/// \returns Latency of \p NewRoot +unsigned MachineCombiner::getLatency(MachineInstr *Root, MachineInstr *NewRoot, + MachineTraceMetrics::Trace BlockTrace) { + + assert(TSchedModel.hasInstrSchedModel() && "Missing machine model\n"); + + // Check each definition in NewRoot and compute the latency + unsigned NewRootLatency = 0; + + for (unsigned i = 0, e = NewRoot->getNumOperands(); i != e; ++i) { + const MachineOperand &MO = NewRoot->getOperand(i); + // Check for virtual register operand. + if (!(MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg()))) + continue; + if (!MO.isDef()) + continue; + // Get the first instruction that uses MO + MachineRegisterInfo::reg_iterator RI = MRI->reg_begin(MO.getReg()); + RI++; + MachineInstr *UseMO = RI->getParent(); + unsigned LatencyOp = 0; + if (UseMO && BlockTrace.isDepInTrace(Root, UseMO)) { + LatencyOp = TSchedModel.computeOperandLatency( + NewRoot, NewRoot->findRegisterDefOperandIdx(MO.getReg()), UseMO, + UseMO->findRegisterUseOperandIdx(MO.getReg())); + } else { + LatencyOp = TSchedModel.computeInstrLatency(NewRoot->getOpcode()); + } + NewRootLatency = std::max(NewRootLatency, LatencyOp); + } + return NewRootLatency; +} + +/// preservesCriticalPathlen - True when the new instruction sequence does not +/// lengthen the critical path. The DAGCombine code sequence ends in MI +/// (Machine Instruction) Root. The new code sequence ends in MI NewRoot. A +/// necessary condition for the new sequence to replace the old sequence is that +/// is cannot lengthen the critical path. This is decided by the formula +/// (NewRootDepth + NewRootLatency) <= (RootDepth + RootLatency + RootSlack)). +/// The slack is the number of cycles Root can be delayed before the critical +/// patch becomes longer. +bool MachineCombiner::preservesCriticalPathLen( + MachineBasicBlock *MBB, MachineInstr *Root, + MachineTraceMetrics::Trace BlockTrace, + SmallVectorImpl &InsInstrs, + DenseMap &InstrIdxForVirtReg) { + + assert(TSchedModel.hasInstrSchedModel() && "Missing machine model\n"); + // NewRoot is the last instruction in the \p InsInstrs vector + // Get depth and latency of NewRoot + unsigned NewRootIdx = InsInstrs.size() - 1; + MachineInstr *NewRoot = InsInstrs[NewRootIdx]; + unsigned NewRootDepth = getDepth(InsInstrs, InstrIdxForVirtReg, BlockTrace); + unsigned NewRootLatency = getLatency(Root, NewRoot, BlockTrace); + + // Get depth, latency and slack of Root + unsigned RootDepth = BlockTrace.getInstrCycles(Root).Depth; + unsigned RootLatency = TSchedModel.computeInstrLatency(Root); + unsigned RootSlack = BlockTrace.getInstrSlack(Root); + + DEBUG(dbgs() << "DEPENDENCE DATA FOR " << Root << "\n"; + dbgs() << " NewRootDepth: " << NewRootDepth + << " NewRootLatency: " << NewRootLatency << "\n"; + dbgs() << " RootDepth: " << RootDepth << " RootLatency: " << RootLatency + << " RootSlack: " << RootSlack << "\n"; + dbgs() << " NewRootDepth + NewRootLatency " + << NewRootDepth + NewRootLatency << "\n"; + dbgs() << " RootDepth + RootLatency + RootSlack " + << RootDepth + RootLatency + RootSlack << "\n";); + + /// True when the new sequence does not lenghten the critical path. + return ((NewRootDepth + NewRootLatency) <= + (RootDepth + RootLatency + RootSlack)); +} + +/// helper routine to convert instructions into SC +void MachineCombiner::instr2instrSC( + SmallVectorImpl &Instrs, + SmallVectorImpl &InstrsSC) { + for (auto *InstrPtr : Instrs) { + unsigned Opc = InstrPtr->getOpcode(); + unsigned Idx = TII->get(Opc).getSchedClass(); + const MCSchedClassDesc *SC = SchedModel->getSchedClassDesc(Idx); + InstrsSC.push_back(SC); + } +} +/// preservesResourceLen - True when the new instructions do not increase +/// resource length +bool MachineCombiner::preservesResourceLen( + MachineBasicBlock *MBB, MachineTraceMetrics::Trace BlockTrace, + SmallVectorImpl &InsInstrs, + SmallVectorImpl &DelInstrs) { + + // Compute current resource length + + ArrayRef MBBarr(MBB); + unsigned ResLenBeforeCombine = BlockTrace.getResourceLength(MBBarr); + + // Deal with SC rather than Instructions. + SmallVector InsInstrsSC; + SmallVector DelInstrsSC; + + instr2instrSC(InsInstrs, InsInstrsSC); + instr2instrSC(DelInstrs, DelInstrsSC); + + ArrayRef MSCInsArr = makeArrayRef(InsInstrsSC); + ArrayRef MSCDelArr = makeArrayRef(DelInstrsSC); + + // Compute new resource length + unsigned ResLenAfterCombine = + BlockTrace.getResourceLength(MBBarr, MSCInsArr, MSCDelArr); + + DEBUG(dbgs() << "RESOURCE DATA: \n"; + dbgs() << " resource len before: " << ResLenBeforeCombine + << " after: " << ResLenAfterCombine << "\n";); + + return ResLenAfterCombine <= ResLenBeforeCombine; +} + +/// \returns true when new instruction sequence should be generated +/// independent if it lenghtens critical path or not +bool MachineCombiner::doSubstitute(unsigned NewSize, unsigned OldSize) { + if (OptSize && (NewSize < OldSize)) + return true; + if (!TSchedModel.hasInstrSchedModel()) + return true; + return false; +} + +/// combineInstructions - substitute a slow code sequence with a faster one by +/// evaluating instruction combining pattern. +/// The prototype of such a pattern is MUl + ADD -> MADD. Performs instruction +/// combining based on machine trace metrics. Only combine a sequence of +/// instructions when this neither lengthens the critical path nor increases +/// resource pressure. When optimizing for codesize always combine when the new +/// sequence is shorter. +bool MachineCombiner::combineInstructions(MachineBasicBlock *MBB) { + bool Changed = false; + DEBUG(dbgs() << "Combining MBB " << MBB->getName() << "\n"); + + auto BlockIter = MBB->begin(); + + while (BlockIter != MBB->end()) { + auto &MI = *BlockIter++; + + DEBUG(dbgs() << "INSTR "; MI.dump(); dbgs() << "\n";); + SmallVector Pattern; + // The motivating example is: + // + // MUL Other MUL_op1 MUL_op2 Other + // \ / \ | / + // ADD/SUB => MADD/MSUB + // (=Root) (=NewRoot) + + // The DAGCombine code always replaced MUL + ADD/SUB by MADD. While this is + // usually beneficial for code size it unfortunately can hurt performance + // when the ADD is on the critical path, but the MUL is not. With the + // substitution the MUL becomes part of the critical path (in form of the + // MADD) and can lengthen it on architectures where the MADD latency is + // longer than the ADD latency. + // + // For each instruction we check if it can be the root of a combiner + // pattern. Then for each pattern the new code sequence in form of MI is + // generated and evaluated. When the efficiency criteria (don't lengthen + // critical path, don't use more resources) is met the new sequence gets + // hooked up into the basic block before the old sequence is removed. + // + // The algorithm does not try to evaluate all patterns and pick the best. + // This is only an artificial restriction though. In practice there is + // mostly one pattern and hasPattern() can order patterns based on an + // internal cost heuristic. + + if (TII->hasPattern(MI, Pattern)) { + for (auto P : Pattern) { + SmallVector InsInstrs; + SmallVector DelInstrs; + DenseMap InstrIdxForVirtReg; + if (!MinInstr) + MinInstr = Traces->getEnsemble(MachineTraceMetrics::TS_MinInstrCount); + MachineTraceMetrics::Trace BlockTrace = MinInstr->getTrace(MBB); + Traces->verifyAnalysis(); + TII->genAlternativeCodeSequence(MI, P, InsInstrs, DelInstrs, + InstrIdxForVirtReg); + // Found pattern, but did not generate alternative sequence. + // This can happen e.g. when an immediate could not be materialized + // in a single instruction. + if (!InsInstrs.size()) + continue; + // Substitute when we optimize for codesize and the new sequence has + // fewer instructions OR + // the new sequence neither lenghten the critical path nor increases + // resource pressure. + if (doSubstitute(InsInstrs.size(), DelInstrs.size()) || + (preservesCriticalPathLen(MBB, &MI, BlockTrace, InsInstrs, + InstrIdxForVirtReg) && + preservesResourceLen(MBB, BlockTrace, InsInstrs, DelInstrs))) { + for (auto *InstrPtr : InsInstrs) + MBB->insert((MachineBasicBlock::iterator) & MI, + (MachineInstr *)InstrPtr); + for (auto *InstrPtr : DelInstrs) + InstrPtr->eraseFromParent(); + + Changed = true; + ++NumInstCombined; + + Traces->invalidate(MBB); + Traces->verifyAnalysis(); + // Eagerly stop after the first pattern fired + break; + } else { + // Cleanup instructions of the alternative code sequence. There is no + // use for them. + for (auto *InstrPtr : InsInstrs) { + MachineFunction *MF = MBB->getParent(); + MF->DeleteMachineInstr((MachineInstr *)InstrPtr); + } + } + InstrIdxForVirtReg.clear(); + } + } + } + + return Changed; +} + +bool MachineCombiner::runOnMachineFunction(MachineFunction &MF) { + TII = MF.getTarget().getInstrInfo(); + TRI = MF.getTarget().getRegisterInfo(); + const TargetSubtargetInfo &STI = + MF.getTarget().getSubtarget(); + SchedModel = STI.getSchedModel(); + TSchedModel.init(*SchedModel, &STI, TII); + MRI = &MF.getRegInfo(); + Traces = &getAnalysis(); + MinInstr = 0; + + OptSize = MF.getFunction()->getAttributes().hasAttribute( + AttributeSet::FunctionIndex, Attribute::OptimizeForSize); + + DEBUG(dbgs() << getPassName() << ": " << MF.getName() << '\n'); + if (!TII->useMachineCombiner()) { + DEBUG(dbgs() << " Skipping pass: Target does not support machine combiner\n"); + return false; + } + + bool Changed = false; + + // Try to combine instructions. + for (auto &MBB : MF) + Changed |= combineInstructions(&MBB); + + return Changed; +} Index: lib/CodeGen/MachineScheduler.cpp =================================================================== --- lib/CodeGen/MachineScheduler.cpp +++ lib/CodeGen/MachineScheduler.cpp @@ -40,6 +40,9 @@ cl::desc("Force top-down list scheduling")); cl::opt ForceBottomUp("misched-bottomup", cl::Hidden, cl::desc("Force bottom-up list scheduling")); +cl::opt +DumpCriticalPathLength("misched-dcpl", cl::Hidden, + cl::desc("Print critical path length to stdout")); } #ifndef NDEBUG @@ -451,6 +454,11 @@ else dbgs() << "End"; dbgs() << " RegionInstrs: " << NumRegionInstrs << " Remaining: " << RemainingInstrs << "\n"); + if (DumpCriticalPathLength) { + errs() << MF->getName(); + errs() << ":BB# " << MBB->getNumber(); + errs() << " " << MBB->getName() << " \n"; + } // Schedule a region: possibly reorder instructions. // This invalidates 'RegionEnd' and 'I'. @@ -2460,7 +2468,10 @@ if ((*I)->getDepth() > Rem.CriticalPath) Rem.CriticalPath = (*I)->getDepth(); } - DEBUG(dbgs() << "Critical Path: " << Rem.CriticalPath << '\n'); + DEBUG(dbgs() << "Critical Path(GS-RR ): " << Rem.CriticalPath << '\n'); + if (DumpCriticalPathLength) { + errs() << "Critical Path(GS-RR ): " << Rem.CriticalPath << " \n"; + } if (EnableCyclicPath) { Rem.CyclicCritPath = DAG->computeCyclicCriticalPath(); @@ -2902,7 +2913,10 @@ if ((*I)->getDepth() > Rem.CriticalPath) Rem.CriticalPath = (*I)->getDepth(); } - DEBUG(dbgs() << "Critical Path: " << Rem.CriticalPath << '\n'); + DEBUG(dbgs() << "Critical Path: (PGS-RR) " << Rem.CriticalPath << '\n'); + if (DumpCriticalPathLength) { + errs() << "Critical Path(PGS-RR ): " << Rem.CriticalPath << " \n"; + } } /// Apply a set of heursitics to a new candidate for PostRA scheduling. Index: lib/CodeGen/MachineTraceMetrics.cpp =================================================================== --- lib/CodeGen/MachineTraceMetrics.cpp +++ lib/CodeGen/MachineTraceMetrics.cpp @@ -1169,6 +1169,7 @@ return DepCycle; } +/// When bottom is set include instructions in current block in estimate. unsigned MachineTraceMetrics::Trace::getResourceDepth(bool Bottom) const { // Find the limiting processor resource. // Numbers have been pre-scaled to be comparable. @@ -1185,7 +1186,9 @@ // Convert to cycle count. PRMax = TE.MTM.getCycles(PRMax); + /// All instructions before current block unsigned Instrs = TBI.InstrDepth; + // plus instructions in current block if (Bottom) Instrs += TE.MTM.BlockInfo[getBlockNum()].InstrCount; if (unsigned IW = TE.MTM.SchedModel.getIssueWidth()) @@ -1194,44 +1197,72 @@ return std::max(Instrs, PRMax); } - -unsigned MachineTraceMetrics::Trace:: -getResourceLength(ArrayRef Extrablocks, - ArrayRef ExtraInstrs) const { +unsigned MachineTraceMetrics::Trace::getResourceLength( + ArrayRef Extrablocks, + ArrayRef ExtraInstrs, + ArrayRef RemoveInstrs) const { // Add up resources above and below the center block. ArrayRef PRDepths = TE.getProcResourceDepths(getBlockNum()); ArrayRef PRHeights = TE.getProcResourceHeights(getBlockNum()); unsigned PRMax = 0; - for (unsigned K = 0; K != PRDepths.size(); ++K) { - unsigned PRCycles = PRDepths[K] + PRHeights[K]; - for (unsigned I = 0; I != Extrablocks.size(); ++I) - PRCycles += TE.MTM.getProcResourceCycles(Extrablocks[I]->getNumber())[K]; - for (unsigned I = 0; I != ExtraInstrs.size(); ++I) { - const MCSchedClassDesc* SC = ExtraInstrs[I]; + + // Capture computing cycles from extra instructions + auto extraCycles = [this](ArrayRef Instrs, + unsigned ResourceIdx) + ->unsigned { + unsigned Cycles = 0; + for (unsigned I = 0; I != Instrs.size(); ++I) { + const MCSchedClassDesc *SC = Instrs[I]; if (!SC->isValid()) continue; for (TargetSchedModel::ProcResIter - PI = TE.MTM.SchedModel.getWriteProcResBegin(SC), - PE = TE.MTM.SchedModel.getWriteProcResEnd(SC); PI != PE; ++PI) { - if (PI->ProcResourceIdx != K) + PI = TE.MTM.SchedModel.getWriteProcResBegin(SC), + PE = TE.MTM.SchedModel.getWriteProcResEnd(SC); + PI != PE; ++PI) { + if (PI->ProcResourceIdx != ResourceIdx) continue; - PRCycles += (PI->Cycles * TE.MTM.SchedModel.getResourceFactor(K)); + Cycles += + (PI->Cycles * TE.MTM.SchedModel.getResourceFactor(ResourceIdx)); } } + return Cycles; + }; + + for (unsigned K = 0; K != PRDepths.size(); ++K) { + unsigned PRCycles = PRDepths[K] + PRHeights[K]; + for (unsigned I = 0; I != Extrablocks.size(); ++I) + PRCycles += TE.MTM.getProcResourceCycles(Extrablocks[I]->getNumber())[K]; + PRCycles += extraCycles(ExtraInstrs, K); + PRCycles -= extraCycles(RemoveInstrs, K); PRMax = std::max(PRMax, PRCycles); } // Convert to cycle count. PRMax = TE.MTM.getCycles(PRMax); + // Instrs: #instructions in current trace outside current block. unsigned Instrs = TBI.InstrDepth + TBI.InstrHeight; + // Add instruction count from the extra blocks. for (unsigned i = 0, e = Extrablocks.size(); i != e; ++i) Instrs += TE.MTM.getResources(Extrablocks[i])->InstrCount; + Instrs += ExtraInstrs.size(); + Instrs -= RemoveInstrs.size(); if (unsigned IW = TE.MTM.SchedModel.getIssueWidth()) Instrs /= IW; // Assume issue width 1 without a schedule model. return std::max(Instrs, PRMax); } +bool MachineTraceMetrics::Trace::isDepInTrace(const MachineInstr *DefMI, + const MachineInstr *UseMI) const { + if (DefMI->getParent() == UseMI->getParent()) + return true; + + const TraceBlockInfo &DepTBI = TE.BlockInfo[DefMI->getParent()->getNumber()]; + const TraceBlockInfo &TBI = TE.BlockInfo[UseMI->getParent()->getNumber()]; + + return DepTBI.isUsefulDominator(TBI); +} + void MachineTraceMetrics::Ensemble::print(raw_ostream &OS) const { OS << getName() << " ensemble:\n"; for (unsigned i = 0, e = BlockInfo.size(); i != e; ++i) { Index: lib/CodeGen/TargetSchedule.cpp =================================================================== --- lib/CodeGen/TargetSchedule.cpp +++ lib/CodeGen/TargetSchedule.cpp @@ -225,6 +225,28 @@ return DefMI->isTransient() ? 0 : TII->defaultDefLatency(&SchedModel, DefMI); } +unsigned TargetSchedModel::computeInstrLatency(unsigned Opcode) const { + assert(hasInstrSchedModel() && "Only call this function with a SchedModel"); + + unsigned SCIdx = TII->get(Opcode).getSchedClass(); + const MCSchedClassDesc *SCDesc = SchedModel.getSchedClassDesc(SCIdx); + unsigned Latency = 0; + + if (SCDesc->isValid() && !SCDesc->isVariant()) { + for (unsigned DefIdx = 0, DefEnd = SCDesc->NumWriteLatencyEntries; + DefIdx != DefEnd; ++DefIdx) { + // Lookup the definition's write latency in SubtargetInfo. + const MCWriteLatencyEntry *WLEntry = + STI->getWriteLatencyEntry(SCDesc, DefIdx); + Latency = std::max(Latency, capLatency(WLEntry->Cycles)); + } + return Latency; + } + + assert(Latency && "No MI sched latency"); + return 0; +} + unsigned TargetSchedModel::computeInstrLatency(const MachineInstr *MI, bool UseDefaultDefLatency) const { Index: lib/Target/AArch64/AArch64InstrFormats.td =================================================================== --- lib/Target/AArch64/AArch64InstrFormats.td +++ lib/Target/AArch64/AArch64InstrFormats.td @@ -1351,14 +1351,15 @@ } multiclass MulAccum { + // MADD/MSUB generation is decided by MachineCombiner.cpp def Wrrr : BaseMulAccum, + [/*(set GPR32:$Rd, (AccNode GPR32:$Ra, (mul GPR32:$Rn, GPR32:$Rm)))*/]>, Sched<[WriteIM32, ReadIM, ReadIM, ReadIMA]> { let Inst{31} = 0; } def Xrrr : BaseMulAccum, + [/*(set GPR64:$Rd, (AccNode GPR64:$Ra, (mul GPR64:$Rn, GPR64:$Rm)))*/]>, Sched<[WriteIM64, ReadIM, ReadIM, ReadIMA]> { let Inst{31} = 1; } Index: lib/Target/AArch64/AArch64InstrInfo.h =================================================================== --- lib/Target/AArch64/AArch64InstrInfo.h +++ lib/Target/AArch64/AArch64InstrInfo.h @@ -17,6 +17,7 @@ #include "AArch64.h" #include "AArch64RegisterInfo.h" #include "llvm/Target/TargetInstrInfo.h" +#include "llvm/CodeGen/MachineCombinerPattern.h" #define GET_INSTRINFO_HEADER #include "AArch64GenInstrInfo.inc" @@ -153,6 +154,24 @@ bool optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2, int CmpMask, int CmpValue, const MachineRegisterInfo *MRI) const override; + /// hasPattern - return true when there is potentially a faster code sequence + /// for an instruction chain ending in . All potential patterns are + /// listed + /// in the array. + virtual bool hasPattern( + MachineInstr &Root, + SmallVectorImpl &Pattern) const; + + /// genAlternativeCodeSequence - when hasPattern() finds a pattern + /// this function generates the instructions that could replace the + /// original code sequence + virtual void genAlternativeCodeSequence( + MachineInstr &Root, MachineCombinerPattern::MC_PATTERN P, + SmallVectorImpl &InsInstrs, + SmallVectorImpl &DelInstrs, + DenseMap &InstrIdxForVirtReg) const; + /// useMachineCombiner - AArch64 supports MachineCombiner + virtual bool useMachineCombiner(void) const; private: void instantiateCondBranch(MachineBasicBlock &MBB, DebugLoc DL, Index: lib/Target/AArch64/AArch64InstrInfo.cpp =================================================================== --- lib/Target/AArch64/AArch64InstrInfo.cpp +++ lib/Target/AArch64/AArch64InstrInfo.cpp @@ -14,6 +14,7 @@ #include "AArch64InstrInfo.h" #include "AArch64Subtarget.h" #include "MCTargetDesc/AArch64AddressingModes.h" +#include "AArch64MachineCombinerPattern.h" #include "llvm/CodeGen/MachineFrameInfo.h" #include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/MachineMemOperand.h" @@ -652,17 +653,12 @@ return true; } -/// optimizeCompareInstr - Convert the instruction supplying the argument to the -/// comparison into one that sets the zero bit in the flags register. -bool AArch64InstrInfo::optimizeCompareInstr( - MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2, int CmpMask, - int CmpValue, const MachineRegisterInfo *MRI) const { - - // Replace SUBSWrr with SUBWrr if NZCV is not used. - int Cmp_NZCV = CmpInstr->findRegisterDefOperandIdx(AArch64::NZCV, true); - if (Cmp_NZCV != -1) { +/// convertFlagSettingOpcode - return opcode that does not +/// set flags when possible. The caller is responsible to do +/// the actual substitution and legality checking. +static unsigned convertFlagSettingOpcode(MachineInstr *MI) { unsigned NewOpc; - switch (CmpInstr->getOpcode()) { + switch (MI->getOpcode()) { default: return false; case AArch64::ADDSWrr: NewOpc = AArch64::ADDWrr; break; @@ -682,7 +678,22 @@ case AArch64::SUBSXrs: NewOpc = AArch64::SUBXrs; break; case AArch64::SUBSXrx: NewOpc = AArch64::SUBXrx; break; } + return NewOpc; +} + +/// optimizeCompareInstr - Convert the instruction supplying the argument to the +/// comparison into one that sets the zero bit in the flags register. +bool AArch64InstrInfo::optimizeCompareInstr( + MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2, int CmpMask, + int CmpValue, const MachineRegisterInfo *MRI) const { + // Replace SUBSWrr with SUBWrr if NZCV is not used. + int Cmp_NZCV = CmpInstr->findRegisterDefOperandIdx(AArch64::NZCV, true); + if (Cmp_NZCV != -1) { + unsigned Opc = CmpInstr->getOpcode(); + unsigned NewOpc = convertFlagSettingOpcode(CmpInstr); + if (NewOpc == Opc) + return false; const MCInstrDesc &MCID = get(NewOpc); CmpInstr->setDesc(MCID); CmpInstr->RemoveOperand(Cmp_NZCV); @@ -2087,3 +2098,448 @@ NopInst.setOpcode(AArch64::HINT); NopInst.addOperand(MCOperand::CreateImm(0)); } +/// useMachineCombiner - return true when a target supports MachineCombiner +bool AArch64InstrInfo::useMachineCombiner(void) const { + // AArch64 supports the combiner + return true; +} +// +// True when Opc sets flag +static bool isCombineInstrSettingFlag(unsigned Opc) { + switch (Opc) { + case AArch64::ADDSWrr: + case AArch64::ADDSWri: + case AArch64::ADDSXrr: + case AArch64::ADDSXri: + case AArch64::SUBSWrr: + case AArch64::SUBSXrr: + // Note: MSUB Wd,Wn,Wm,Wi -> Wd = Wi - WnxWm, not Wd=WnxWm - Wi. + case AArch64::SUBSWri: + case AArch64::SUBSXri: + return true; + default: + break; + } + return false; +} +// +// 32b Opcodes that can be combined with a MUL +static bool isCombineInstrCandidate32(unsigned Opc) { + switch (Opc) { + case AArch64::ADDWrr: + case AArch64::ADDWri: + case AArch64::SUBWrr: + case AArch64::ADDSWrr: + case AArch64::ADDSWri: + case AArch64::SUBSWrr: + // Note: MSUB Wd,Wn,Wm,Wi -> Wd = Wi - WnxWm, not Wd=WnxWm - Wi. + case AArch64::SUBWri: + case AArch64::SUBSWri: + return true; + default: + break; + } + return false; +} +// +// 64b Opcodes that can be combined with a MUL +static bool isCombineInstrCandidate64(unsigned Opc) { + switch (Opc) { + case AArch64::ADDXrr: + case AArch64::ADDXri: + case AArch64::SUBXrr: + case AArch64::ADDSXrr: + case AArch64::ADDSXri: + case AArch64::SUBSXrr: + // Note: MSUB Wd,Wn,Wm,Wi -> Wd = Wi - WnxWm, not Wd=WnxWm - Wi. + case AArch64::SUBXri: + case AArch64::SUBSXri: + return true; + default: + break; + } + return false; +} +// +// Opcodes that can be combined with a MUL +static bool isCombineInstrCandidate(unsigned Opc) { + return (isCombineInstrCandidate32(Opc) || isCombineInstrCandidate64(Opc)); +} + +static bool canCombineWithMUL(MachineBasicBlock &MBB, MachineOperand &MO, + unsigned MulOpc, unsigned ZeroReg) { + MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); + MachineInstr *MI = nullptr; + // We need a virtual register definition. + if (MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg())) + MI = MRI.getUniqueVRegDef(MO.getReg()); + // And it needs to be in the trace (otherwise, it won't have a depth). + if (!MI || MI->getParent() != &MBB || (unsigned)MI->getOpcode() != MulOpc) + return false; + + assert(MI->getNumOperands() >= 4 && MI->getOperand(0).isReg() && + MI->getOperand(1).isReg() && MI->getOperand(2).isReg() && + MI->getOperand(3).isReg() && "MAdd/MSub must have a least 4 regs"); + + // The third input reg must be zero. + if (MI->getOperand(3).getReg() != ZeroReg) + return false; + + // Must only used by the user we combine with. + if (!MRI.hasOneNonDBGUse(MI->getOperand(0).getReg())) + return false; + + return true; +} + +/// hasPattern - return true when there is potentially a faster code sequence +/// for an instruction chain ending in \p Root. All potential patterns are +/// listed +/// in the \p Pattern vector. Pattern should be sorted in priority order since +/// the pattern evaluator stops checking as soon as it finds a faster sequence. + +bool AArch64InstrInfo::hasPattern( + MachineInstr &Root, + SmallVectorImpl &Pattern) const { + unsigned Opc = Root.getOpcode(); + MachineBasicBlock &MBB = *Root.getParent(); + bool Found = false; + + if (!isCombineInstrCandidate(Opc)) + return 0; + if (isCombineInstrSettingFlag(Opc)) { + int Cmp_NZCV = Root.findRegisterDefOperandIdx(AArch64::NZCV, true); + // When NZCV is live bail out. + if (Cmp_NZCV == -1) + return 0; + unsigned NewOpc = convertFlagSettingOpcode(&Root); + // When opcode can't change bail out. + // CHECKME: do we miss any cases for opcode conversion? + if (NewOpc == Opc) + return 0; + Opc = NewOpc; + } + + switch (Opc) { + default: + break; + case AArch64::ADDWrr: + assert(Root.getOperand(1).isReg() && Root.getOperand(2).isReg() && + "ADDWrr does not have register operands"); + if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr, + AArch64::WZR)) { + Pattern.push_back(MachineCombinerPattern::MC_MULADDW_OP1); + Found = true; + } + if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDWrrr, + AArch64::WZR)) { + Pattern.push_back(MachineCombinerPattern::MC_MULADDW_OP2); + Found = true; + } + break; + case AArch64::ADDXrr: + if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr, + AArch64::XZR)) { + Pattern.push_back(MachineCombinerPattern::MC_MULADDX_OP1); + Found = true; + } + if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDXrrr, + AArch64::XZR)) { + Pattern.push_back(MachineCombinerPattern::MC_MULADDX_OP2); + Found = true; + } + break; + case AArch64::SUBWrr: + if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr, + AArch64::WZR)) { + Pattern.push_back(MachineCombinerPattern::MC_MULSUBW_OP1); + Found = true; + } + if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDWrrr, + AArch64::WZR)) { + Pattern.push_back(MachineCombinerPattern::MC_MULSUBW_OP2); + Found = true; + } + break; + case AArch64::SUBXrr: + if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr, + AArch64::XZR)) { + Pattern.push_back(MachineCombinerPattern::MC_MULSUBX_OP1); + Found = true; + } + if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDXrrr, + AArch64::XZR)) { + Pattern.push_back(MachineCombinerPattern::MC_MULSUBX_OP2); + Found = true; + } + break; + case AArch64::ADDWri: + if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr, + AArch64::WZR)) { + Pattern.push_back(MachineCombinerPattern::MC_MULADDWI_OP1); + Found = true; + } + break; + case AArch64::ADDXri: + if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr, + AArch64::XZR)) { + Pattern.push_back(MachineCombinerPattern::MC_MULADDXI_OP1); + Found = true; + } + break; + case AArch64::SUBWri: + if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr, + AArch64::WZR)) { + Pattern.push_back(MachineCombinerPattern::MC_MULSUBWI_OP1); + Found = true; + } + break; + case AArch64::SUBXri: + if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr, + AArch64::XZR)) { + Pattern.push_back(MachineCombinerPattern::MC_MULSUBXI_OP1); + Found = true; + } + break; + } + return Found; +} + +/// genMadd - Generate madd instruction and combine mul and add. +/// Example: +/// MUL I=A,B,0 +/// ADD R,I,C +/// ==> MADD R,A,B,C +/// \param Root is the ADD instruction +/// \param [out] InsInstr is a vector of machine instructions and will +/// contain the generated madd instruction +/// \param IdxMulOpd is index of operand in Root that is the result of +/// the MUL. In the example above IdxMulOpd is 1. +/// \param MaddOpc the opcode fo the madd instruction +static MachineInstr *genMadd(MachineFunction &MF, MachineRegisterInfo &MRI, + const TargetInstrInfo *TII, MachineInstr &Root, + SmallVectorImpl &InsInstrs, + unsigned IdxMulOpd, unsigned MaddOpc) { + assert(IdxMulOpd == 1 || IdxMulOpd == 2); + + unsigned IdxOtherOpd = IdxMulOpd == 1 ? 2 : 1; + MachineInstr *MUL = MRI.getUniqueVRegDef(Root.getOperand(IdxMulOpd).getReg()); + MachineOperand R = Root.getOperand(0); + MachineOperand A = MUL->getOperand(1); + MachineOperand B = MUL->getOperand(2); + MachineOperand C = Root.getOperand(IdxOtherOpd); + MachineInstrBuilder MIB = BuildMI(MF, Root.getDebugLoc(), TII->get(MaddOpc)) + .addOperand(R) + .addOperand(A) + .addOperand(B) + .addOperand(C); + // Insert the MADD + InsInstrs.push_back(MIB); + return MUL; +} + +/// genMaddR - Generate madd instruction and combine mul and add using +/// an extra virtual register +/// Example - an ADD intermediate needs to be stored in a register: +/// MUL I=A,B,0 +/// ADD R,I,Imm +/// ==> ORR V, ZR, Imm +/// ==> MADD R,A,B,V +/// \param Root is the ADD instruction +/// \param [out] InsInstr is a vector of machine instructions and will +/// contain the generated madd instruction +/// \param IdxMulOpd is index of operand in Root that is the result of +/// the MUL. In the example above IdxMulOpd is 1. +/// \param MaddOpc the opcode fo the madd instruction +/// \param VR is a virtual register that holds the value of an ADD operand +/// (V in the example above). +static MachineInstr *genMaddR(MachineFunction &MF, MachineRegisterInfo &MRI, + const TargetInstrInfo *TII, MachineInstr &Root, + SmallVectorImpl &InsInstrs, + unsigned IdxMulOpd, unsigned MaddOpc, + unsigned VR) { + assert(IdxMulOpd == 1 || IdxMulOpd == 2); + + MachineInstr *MUL = MRI.getUniqueVRegDef(Root.getOperand(IdxMulOpd).getReg()); + MachineOperand R = Root.getOperand(0); + MachineOperand A = MUL->getOperand(1); + MachineOperand B = MUL->getOperand(2); + MachineInstrBuilder MIB = BuildMI(MF, Root.getDebugLoc(), TII->get(MaddOpc)) + .addOperand(R) + .addOperand(A) + .addOperand(B) + .addReg(VR); + // Insert the MADD + InsInstrs.push_back(MIB); + return MUL; +} +/// genAlternativeCodeSequence - when hasPattern() finds a pattern +/// this function generates the instructions that could replace the +/// original code sequence +void AArch64InstrInfo::genAlternativeCodeSequence( + MachineInstr &Root, MachineCombinerPattern::MC_PATTERN Pattern, + SmallVectorImpl &InsInstrs, + SmallVectorImpl &DelInstrs, + DenseMap &InstrIdxForVirtReg) const { + MachineBasicBlock &MBB = *Root.getParent(); + MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); + MachineFunction &MF = *MBB.getParent(); + const TargetInstrInfo *TII = MF.getTarget().getInstrInfo(); + + MachineInstr *MUL; + unsigned Opc; + switch (Pattern) { + default: + // signal error. + break; + case MachineCombinerPattern::MC_MULADDW_OP1: + case MachineCombinerPattern::MC_MULADDX_OP1: + // MUL I=A,B,0 + // ADD R,I,C + // ==> MADD R,A,B,C + // --- Create(MADD); + Opc = Pattern == MachineCombinerPattern::MC_MULADDW_OP1 ? AArch64::MADDWrrr + : AArch64::MADDXrrr; + MUL = genMadd(MF, MRI, TII, Root, InsInstrs, 1, Opc); + break; + case MachineCombinerPattern::MC_MULADDW_OP2: + case MachineCombinerPattern::MC_MULADDX_OP2: + // MUL I=A,B,0 + // ADD R,C,I + // ==> MADD R,A,B,C + // --- Create(MADD); + Opc = Pattern == MachineCombinerPattern::MC_MULADDW_OP2 ? AArch64::MADDWrrr + : AArch64::MADDXrrr; + MUL = genMadd(MF, MRI, TII, Root, InsInstrs, 2, Opc); + break; + case MachineCombinerPattern::MC_MULADDWI_OP1: + case MachineCombinerPattern::MC_MULADDXI_OP1: + // MUL I=A,B,0 + // ADD R,I,Imm + // ==> ORR V, ZR, Imm + // ==> MADD R,A,B,V + // --- Create(MADD); + { + const TargetRegisterClass *RC = + MRI.getRegClass(Root.getOperand(1).getReg()); + unsigned NewVR = MRI.createVirtualRegister(RC); + unsigned BitSize, OrrOpc, ZeroReg; + if (Pattern == MachineCombinerPattern::MC_MULADDWI_OP1) { + BitSize = 32; + OrrOpc = AArch64::ORRWri; + ZeroReg = AArch64::WZR; + Opc = AArch64::MADDWrrr; + } else { + OrrOpc = AArch64::ORRXri; + BitSize = 64; + ZeroReg = AArch64::XZR; + Opc = AArch64::MADDXrrr; + } + uint64_t Imm = Root.getOperand(2).getImm(); + + if (Root.getOperand(3).isImm()) { + unsigned val = Root.getOperand(3).getImm(); + Imm = Imm << val; + } + uint64_t UImm = Imm << (64 - BitSize) >> (64 - BitSize); + uint64_t Encoding; + + if (AArch64_AM::processLogicalImmediate(UImm, BitSize, Encoding)) { + MachineInstrBuilder MIB1 = + BuildMI(MF, Root.getDebugLoc(), TII->get(OrrOpc)) + .addOperand(MachineOperand::CreateReg(NewVR, RegState::Define)) + .addReg(ZeroReg) + .addImm(Encoding); + InsInstrs.push_back(MIB1); + InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0)); + MUL = genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR); + } + } + break; + case MachineCombinerPattern::MC_MULSUBW_OP1: + case MachineCombinerPattern::MC_MULSUBX_OP1: { + // MUL I=A,B,0 + // SUB R,I, C + // ==> SUB V, 0, C + // ==> MADD R,A,B,V // = -C + A*B + // --- Create(MADD); + const TargetRegisterClass *RC = + MRI.getRegClass(Root.getOperand(1).getReg()); + unsigned NewVR = MRI.createVirtualRegister(RC); + unsigned SubOpc, ZeroReg; + if (Pattern == MachineCombinerPattern::MC_MULSUBW_OP1) { + SubOpc = AArch64::SUBWrr; + ZeroReg = AArch64::WZR; + Opc = AArch64::MADDWrrr; + } else { + SubOpc = AArch64::SUBXrr; + ZeroReg = AArch64::XZR; + Opc = AArch64::MADDXrrr; + } + // SUB NewVR, 0, C + MachineInstrBuilder MIB1 = + BuildMI(MF, Root.getDebugLoc(), TII->get(SubOpc)) + .addOperand(MachineOperand::CreateReg(NewVR, RegState::Define)) + .addReg(ZeroReg) + .addOperand(Root.getOperand(2)); + InsInstrs.push_back(MIB1); + InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0)); + MUL = genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR); + } break; + case MachineCombinerPattern::MC_MULSUBW_OP2: + case MachineCombinerPattern::MC_MULSUBX_OP2: + // MUL I=A,B,0 + // SUB R,C,I + // ==> MSUB R,A,B,C (computes C - A*B) + // --- Create(MSUB); + Opc = Pattern == MachineCombinerPattern::MC_MULSUBW_OP2 ? AArch64::MSUBWrrr + : AArch64::MSUBXrrr; + MUL = genMadd(MF, MRI, TII, Root, InsInstrs, 2, Opc); + break; + case MachineCombinerPattern::MC_MULSUBWI_OP1: + case MachineCombinerPattern::MC_MULSUBXI_OP1: { + // MUL I=A,B,0 + // SUB R,I, Imm + // ==> ORR V, ZR, -Imm + // ==> MADD R,A,B,V // = -Imm + A*B + // --- Create(MADD); + const TargetRegisterClass *RC = + MRI.getRegClass(Root.getOperand(1).getReg()); + unsigned NewVR = MRI.createVirtualRegister(RC); + unsigned BitSize, OrrOpc, ZeroReg; + if (Pattern == MachineCombinerPattern::MC_MULSUBWI_OP1) { + BitSize = 32; + OrrOpc = AArch64::ORRWri; + ZeroReg = AArch64::WZR; + Opc = AArch64::MADDWrrr; + } else { + OrrOpc = AArch64::ORRXri; + BitSize = 64; + ZeroReg = AArch64::XZR; + Opc = AArch64::MADDXrrr; + } + int Imm = Root.getOperand(2).getImm(); + if (Root.getOperand(3).isImm()) { + unsigned val = Root.getOperand(3).getImm(); + Imm = Imm << val; + } + uint64_t UImm = -Imm << (64 - BitSize) >> (64 - BitSize); + uint64_t Encoding; + if (AArch64_AM::processLogicalImmediate(UImm, BitSize, Encoding)) { + MachineInstrBuilder MIB1 = + BuildMI(MF, Root.getDebugLoc(), TII->get(OrrOpc)) + .addOperand(MachineOperand::CreateReg(NewVR, RegState::Define)) + .addReg(ZeroReg) + .addImm(Encoding); + InsInstrs.push_back(MIB1); + InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0)); + MUL = genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR); + } + } break; + } + // Record MUL and ADD/SUB for deletion + DelInstrs.push_back(MUL); + DelInstrs.push_back(&Root); + + return; +} Index: lib/Target/AArch64/AArch64MachineCombinerPattern.h =================================================================== --- /dev/null +++ lib/Target/AArch64/AArch64MachineCombinerPattern.h @@ -0,0 +1,42 @@ +//===- AArch64MachineCombinerPattern.h -===// +//===- AArch64 instruction pattern supported by combiner -===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines instruction pattern supported by combiner +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TARGET_AArch64MACHINECOMBINERPATTERN_H +#define LLVM_TARGET_AArch64MACHINECOMBINERPATTERN_H + +namespace llvm { + +/// Enumeration of instruction pattern supported by machine combiner +/// +/// +namespace MachineCombinerPattern { +enum MC_PATTERN : int { + MC_NONE = 0, + MC_MULADDW_OP1 = 1, + MC_MULADDW_OP2 = 2, + MC_MULSUBW_OP1 = 3, + MC_MULSUBW_OP2 = 4, + MC_MULADDWI_OP1 = 5, + MC_MULSUBWI_OP1 = 6, + MC_MULADDX_OP1 = 7, + MC_MULADDX_OP2 = 8, + MC_MULSUBX_OP1 = 9, + MC_MULSUBX_OP2 = 10, + MC_MULADDXI_OP1 = 11, + MC_MULSUBXI_OP1 = 12 +}; +} // end namespace MachineCombinerPattern +} // end namespace llvm + +#endif Index: lib/Target/AArch64/AArch64TargetMachine.cpp =================================================================== --- lib/Target/AArch64/AArch64TargetMachine.cpp +++ lib/Target/AArch64/AArch64TargetMachine.cpp @@ -24,6 +24,10 @@ EnableCCMP("aarch64-ccmp", cl::desc("Enable the CCMP formation pass"), cl::init(true), cl::Hidden); +static cl::opt EnableMCR("aarch64-mcr", + cl::desc("Enable the machine combiner pass"), + cl::init(true), cl::Hidden); + static cl::opt EnableStPairSuppress("aarch64-stp-suppress", cl::desc("Suppress STP for AArch64"), cl::init(true), cl::Hidden); @@ -176,6 +180,8 @@ bool AArch64PassConfig::addILPOpts() { if (EnableCCMP) addPass(createAArch64ConditionalCompares()); + if (EnableMCR) + addPass(&MachineCombinerID); addPass(&EarlyIfConverterID); if (EnableStPairSuppress) addPass(createAArch64StorePairSuppressPass()); Index: test/CodeGen/AArch64/arm64-neon-mul-div.ll =================================================================== --- test/CodeGen/AArch64/arm64-neon-mul-div.ll +++ test/CodeGen/AArch64/arm64-neon-mul-div.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-none-linux-gnu -mattr=+neon | FileCheck %s +; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-none-linux-gnu -mattr=+neon -mcpu=cyclone | FileCheck %s ; arm64 has its own copy of this because of the intrinsics define <8 x i8> @mul8xi8(<8 x i8> %A, <8 x i8> %B) { @@ -450,8 +450,8 @@ define <2 x i32> @srem2x32(<2 x i32> %A, <2 x i32> %B) { ; CHECK-LABEL: srem2x32: ; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}} -; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}} ; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}} +; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}} ; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}} %tmp3 = srem <2 x i32> %A, %B; ret <2 x i32> %tmp3 @@ -482,8 +482,8 @@ define <2 x i64> @srem2x64(<2 x i64> %A, <2 x i64> %B) { ; CHECK-LABEL: srem2x64: ; CHECK: sdiv {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}} -; CHECK: sdiv {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}} ; CHECK: msub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}} +; CHECK: sdiv {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}} ; CHECK: msub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}} %tmp3 = srem <2 x i64> %A, %B; ret <2 x i64> %tmp3 @@ -612,8 +612,8 @@ define <2 x i32> @urem2x32(<2 x i32> %A, <2 x i32> %B) { ; CHECK-LABEL: urem2x32: ; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}} -; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}} ; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}} +; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}} ; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}} %tmp3 = urem <2 x i32> %A, %B; ret <2 x i32> %tmp3 @@ -644,8 +644,8 @@ define <2 x i64> @urem2x64(<2 x i64> %A, <2 x i64> %B) { ; CHECK-LABEL: urem2x64: ; CHECK: udiv {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}} -; CHECK: udiv {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}} ; CHECK: msub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}} +; CHECK: udiv {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}} ; CHECK: msub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}} %tmp3 = urem <2 x i64> %A, %B; ret <2 x i64> %tmp3 Index: test/CodeGen/AArch64/dp-3source.ll =================================================================== --- test/CodeGen/AArch64/dp-3source.ll +++ test/CodeGen/AArch64/dp-3source.ll @@ -1,4 +1,4 @@ -; RUN: llc -verify-machineinstrs -o - %s -mtriple=arm64-apple-ios7.0 | FileCheck %s +; RUN: llc -verify-machineinstrs -o - %s -mtriple=arm64-apple-ios7.0 -mcpu=cyclone | FileCheck %s define i32 @test_madd32(i32 %val0, i32 %val1, i32 %val2) { ; CHECK-LABEL: test_madd32: Index: test/CodeGen/AArch64/madd-lohi.ll =================================================================== --- /dev/null +++ test/CodeGen/AArch64/madd-lohi.ll @@ -0,0 +1,19 @@ +; RUN: llc -mtriple=arm64-apple-ios7.0 %s -o - | FileCheck %s +; RUN: llc -mtriple=arm64_be-linux-gnu %s -o - | FileCheck --check-prefix=CHECK-BE %s + +define i128 @test_128bitmul(i128 %lhs, i128 %rhs) { +; CHECK-LABEL: test_128bitmul: +; CHECK-DAG: umulh [[CARRY:x[0-9]+]], x0, x2 +; CHECK-DAG: madd [[PART1:x[0-9]+]], x0, x3, [[CARRY]] +; CHECK: madd x1, x1, x2, [[PART1]] +; CHECK: mul x0, x0, x2 + +; CHECK-BE-LABEL: test_128bitmul: +; CHECK-BE-DAG: umulh [[CARRY:x[0-9]+]], x1, x3 +; CHECK-BE-DAG: madd [[PART1:x[0-9]+]], x1, x2, [[CARRY]] +; CHECK-BE: madd x0, x0, x3, [[PART1]] +; CHECK-BE: mul x1, x1, x3 + + %prod = mul i128 %lhs, %rhs + ret i128 %prod +} Index: test/CodeGen/AArch64/mul-lohi.ll =================================================================== --- test/CodeGen/AArch64/mul-lohi.ll +++ test/CodeGen/AArch64/mul-lohi.ll @@ -1,17 +1,17 @@ -; RUN: llc -mtriple=arm64-apple-ios7.0 %s -o - | FileCheck %s -; RUN: llc -mtriple=arm64_be-linux-gnu %s -o - | FileCheck --check-prefix=CHECK-BE %s +; RUN: llc -mtriple=arm64-apple-ios7.0 -mcpu=cyclone %s -o - | FileCheck %s +; RUN: llc -mtriple=arm64_be-linux-gnu -mcpu=cyclone %s -o - | FileCheck --check-prefix=CHECK-BE %s define i128 @test_128bitmul(i128 %lhs, i128 %rhs) { ; CHECK-LABEL: test_128bitmul: +; CHECK-DAG: mul [[PART1:x[0-9]+]], x0, x3 ; CHECK-DAG: umulh [[CARRY:x[0-9]+]], x0, x2 -; CHECK-DAG: madd [[PART1:x[0-9]+]], x0, x3, [[CARRY]] -; CHECK: madd x1, x1, x2, [[PART1]] +; CHECK: mul [[PART2:x[0-9]+]], x1, x2 ; CHECK: mul x0, x0, x2 ; CHECK-BE-LABEL: test_128bitmul: +; CHECK-BE-DAG: mul [[PART1:x[0-9]+]], x1, x2 ; CHECK-BE-DAG: umulh [[CARRY:x[0-9]+]], x1, x3 -; CHECK-BE-DAG: madd [[PART1:x[0-9]+]], x1, x2, [[CARRY]] -; CHECK-BE: madd x0, x0, x3, [[PART1]] +; CHECK-BE: mul [[PART2:x[0-9]+]], x0, x3 ; CHECK-BE: mul x1, x1, x3 %prod = mul i128 %lhs, %rhs