Index: llvm/trunk/include/llvm/CodeGen/MachineInstr.h =================================================================== --- llvm/trunk/include/llvm/CodeGen/MachineInstr.h +++ llvm/trunk/include/llvm/CodeGen/MachineInstr.h @@ -1108,6 +1108,18 @@ /// the instruction's location and its intended destination. bool isSafeToMove(AliasAnalysis *AA, bool &SawStore) const; + /// Returns true if this instruction's memory access aliases the memory + /// access of Other. + // + /// Assumes any physical registers used to compute addresses + /// have the same value for both instructions. Returns false if neither + /// instruction writes to memory. + /// + /// @param AA Optional alias analysis, used to compare memory operands. + /// @param Other MachineInstr to check aliasing against. + /// @param UseTBAA Whether to pass TBAA information to alias analysis. + bool mayAlias(AliasAnalysis *AA, MachineInstr &Other, bool UseTBAA); + /// Return true if this instruction may have an ordered /// or volatile memory reference, or if the information describing the memory /// reference is not available. Return false if it is known to have no Index: llvm/trunk/include/llvm/Target/TargetInstrInfo.h =================================================================== --- llvm/trunk/include/llvm/Target/TargetInstrInfo.h +++ llvm/trunk/include/llvm/Target/TargetInstrInfo.h @@ -1437,10 +1437,17 @@ return nullptr; } - // Sometimes, it is possible for the target - // to tell, even without aliasing information, that two MIs access different - // memory addresses. This function returns true if two MIs access different - // memory addresses and false otherwise. + /// Sometimes, it is possible for the target + /// to tell, even without aliasing information, that two MIs access different + /// memory addresses. This function returns true if two MIs access different + /// memory addresses and false otherwise. + /// + /// Assumes any physical registers used to compute addresses have the same + /// value for both instructions. (This is the most useful assumption for + /// post-RA scheduling.) + /// + /// See also MachineInstr::mayAlias, which is implemented on top of this + /// function. virtual bool areMemAccessesTriviallyDisjoint(MachineInstr &MIa, MachineInstr &MIb, AliasAnalysis *AA = nullptr) const { Index: llvm/trunk/lib/CodeGen/MachineInstr.cpp =================================================================== --- llvm/trunk/lib/CodeGen/MachineInstr.cpp +++ llvm/trunk/lib/CodeGen/MachineInstr.cpp @@ -1571,6 +1571,65 @@ return true; } +bool MachineInstr::mayAlias(AliasAnalysis *AA, MachineInstr &Other, + bool UseTBAA) { + const MachineFunction *MF = getParent()->getParent(); + const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo(); + + // If neither instruction stores to memory, they can't alias in any + // meaningful way, even if they read from the same address. + if (!mayStore() && !Other.mayStore()) + return false; + + // Let the target decide if memory accesses cannot possibly overlap. + if (TII->areMemAccessesTriviallyDisjoint(*this, Other, AA)) + return false; + + if (!AA) + return true; + + // FIXME: Need to handle multiple memory operands to support all targets. + if (!hasOneMemOperand() || !Other.hasOneMemOperand()) + return true; + + MachineMemOperand *MMOa = *memoperands_begin(); + MachineMemOperand *MMOb = *Other.memoperands_begin(); + + if (!MMOa->getValue() || !MMOb->getValue()) + return true; + + // The following interface to AA is fashioned after DAGCombiner::isAlias + // and operates with MachineMemOperand offset with some important + // assumptions: + // - LLVM fundamentally assumes flat address spaces. + // - MachineOperand offset can *only* result from legalization and + // cannot affect queries other than the trivial case of overlap + // checking. + // - These offsets never wrap and never step outside + // of allocated objects. + // - There should never be any negative offsets here. + // + // FIXME: Modify API to hide this math from "user" + // FIXME: Even before we go to AA we can reason locally about some + // memory objects. It can save compile time, and possibly catch some + // corner cases not currently covered. + + assert ((MMOa->getOffset() >= 0) && "Negative MachineMemOperand offset"); + assert ((MMOb->getOffset() >= 0) && "Negative MachineMemOperand offset"); + + int64_t MinOffset = std::min(MMOa->getOffset(), MMOb->getOffset()); + int64_t Overlapa = MMOa->getSize() + MMOa->getOffset() - MinOffset; + int64_t Overlapb = MMOb->getSize() + MMOb->getOffset() - MinOffset; + + AliasResult AAResult = + AA->alias(MemoryLocation(MMOa->getValue(), Overlapa, + UseTBAA ? MMOa->getAAInfo() : AAMDNodes()), + MemoryLocation(MMOb->getValue(), Overlapb, + UseTBAA ? MMOb->getAAInfo() : AAMDNodes())); + + return (AAResult != NoAlias); +} + /// hasOrderedMemoryRef - Return true if this instruction may have an ordered /// or volatile memory reference, or if the information describing the memory /// reference is not available. Return false if it is known to have no ordered Index: llvm/trunk/lib/CodeGen/ScheduleDAGInstrs.cpp =================================================================== --- llvm/trunk/lib/CodeGen/ScheduleDAGInstrs.cpp +++ llvm/trunk/lib/CodeGen/ScheduleDAGInstrs.cpp @@ -537,69 +537,9 @@ (MI->hasOrderedMemoryRef() && !MI->isDereferenceableInvariantLoad(AA)); } -/// Returns true if the two MIs need a chain edge between them. -/// This is called on normal stores and loads. -static bool MIsNeedChainEdge(AliasAnalysis *AA, MachineInstr *MIa, - MachineInstr *MIb) { - const MachineFunction *MF = MIa->getParent()->getParent(); - const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo(); - - assert ((MIa->mayStore() || MIb->mayStore()) && - "Dependency checked between two loads"); - - // Let the target decide if memory accesses cannot possibly overlap. - if (TII->areMemAccessesTriviallyDisjoint(*MIa, *MIb, AA)) - return false; - - // To this point analysis is generic. From here on we do need AA. - if (!AA) - return true; - - // FIXME: Need to handle multiple memory operands to support all targets. - if (!MIa->hasOneMemOperand() || !MIb->hasOneMemOperand()) - return true; - - MachineMemOperand *MMOa = *MIa->memoperands_begin(); - MachineMemOperand *MMOb = *MIb->memoperands_begin(); - - if (!MMOa->getValue() || !MMOb->getValue()) - return true; - - // The following interface to AA is fashioned after DAGCombiner::isAlias - // and operates with MachineMemOperand offset with some important - // assumptions: - // - LLVM fundamentally assumes flat address spaces. - // - MachineOperand offset can *only* result from legalization and - // cannot affect queries other than the trivial case of overlap - // checking. - // - These offsets never wrap and never step outside - // of allocated objects. - // - There should never be any negative offsets here. - // - // FIXME: Modify API to hide this math from "user" - // FIXME: Even before we go to AA we can reason locally about some - // memory objects. It can save compile time, and possibly catch some - // corner cases not currently covered. - - assert ((MMOa->getOffset() >= 0) && "Negative MachineMemOperand offset"); - assert ((MMOb->getOffset() >= 0) && "Negative MachineMemOperand offset"); - - int64_t MinOffset = std::min(MMOa->getOffset(), MMOb->getOffset()); - int64_t Overlapa = MMOa->getSize() + MMOa->getOffset() - MinOffset; - int64_t Overlapb = MMOb->getSize() + MMOb->getOffset() - MinOffset; - - AliasResult AAResult = - AA->alias(MemoryLocation(MMOa->getValue(), Overlapa, - UseTBAA ? MMOa->getAAInfo() : AAMDNodes()), - MemoryLocation(MMOb->getValue(), Overlapb, - UseTBAA ? MMOb->getAAInfo() : AAMDNodes())); - - return (AAResult != NoAlias); -} - void ScheduleDAGInstrs::addChainDependency (SUnit *SUa, SUnit *SUb, unsigned Latency) { - if (MIsNeedChainEdge(AAForDep, SUa->getInstr(), SUb->getInstr())) { + if (SUa->getInstr()->mayAlias(AAForDep, *SUb->getInstr(), UseTBAA)) { SDep Dep(SUa, SDep::MayAliasMem); Dep.setLatency(Latency); SUb->addPred(Dep);