Index: include/llvm/Target/TargetInstrInfo.h =================================================================== --- include/llvm/Target/TargetInstrInfo.h +++ include/llvm/Target/TargetInstrInfo.h @@ -827,10 +827,11 @@ return false; } - /// Get the base register and byte offset of a load/store instr. - virtual bool getLdStBaseRegImmOfs(MachineInstr *LdSt, - unsigned &BaseReg, unsigned &Offset, - const TargetRegisterInfo *TRI) const { + /// Get the base register and byte offset of an instruction that reads/writes + /// memory. + virtual bool getMemOpBaseRegImmOfs(MachineInstr *MemOp, unsigned &BaseReg, + unsigned &Offset, + const TargetRegisterInfo *TRI) const { return false; } Index: lib/CodeGen/MachineScheduler.cpp =================================================================== --- lib/CodeGen/MachineScheduler.cpp +++ lib/CodeGen/MachineScheduler.cpp @@ -1271,7 +1271,7 @@ SUnit *SU = Loads[Idx]; unsigned BaseReg; unsigned Offset; - if (TII->getLdStBaseRegImmOfs(SU->getInstr(), BaseReg, Offset, TRI)) + if (TII->getMemOpBaseRegImmOfs(SU->getInstr(), BaseReg, Offset, TRI)) LoadRecords.push_back(LoadInfo(SU, BaseReg, Offset)); } if (LoadRecords.size() < 2) Index: lib/Target/AArch64/AArch64InstrInfo.h =================================================================== --- lib/Target/AArch64/AArch64InstrInfo.h +++ lib/Target/AArch64/AArch64InstrInfo.h @@ -90,13 +90,13 @@ /// Hint that pairing the given load or store is unprofitable. void suppressLdStPair(MachineInstr *MI) const; - bool getLdStBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg, - unsigned &Offset, - const TargetRegisterInfo *TRI) const override; + bool getMemOpBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg, + unsigned &Offset, + const TargetRegisterInfo *TRI) const override; - bool getLdStBaseRegImmOfsWidth(MachineInstr *LdSt, unsigned &BaseReg, - int &Offset, int &Width, - const TargetRegisterInfo *TRI) const; + bool getMemOpBaseRegImmOfsWidth(MachineInstr *LdSt, unsigned &BaseReg, + int &Offset, int &Width, + const TargetRegisterInfo *TRI) const; bool enableClusterLoads() const override { return true; } Index: lib/Target/AArch64/AArch64InstrInfo.cpp =================================================================== --- lib/Target/AArch64/AArch64InstrInfo.cpp +++ lib/Target/AArch64/AArch64InstrInfo.cpp @@ -629,8 +629,8 @@ // base registers are identical, and the offset of a lower memory access + // the width doesn't overlap the offset of a higher memory access, // then the memory accesses are different. - if (getLdStBaseRegImmOfsWidth(MIa, BaseRegA, OffsetA, WidthA, TRI) && - getLdStBaseRegImmOfsWidth(MIb, BaseRegB, OffsetB, WidthB, TRI)) { + if (getMemOpBaseRegImmOfsWidth(MIa, BaseRegA, OffsetA, WidthA, TRI) && + getMemOpBaseRegImmOfsWidth(MIb, BaseRegB, OffsetB, WidthB, TRI)) { if (BaseRegA == BaseRegB) { int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB; int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA; @@ -1310,9 +1310,9 @@ } bool -AArch64InstrInfo::getLdStBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg, - unsigned &Offset, - const TargetRegisterInfo *TRI) const { +AArch64InstrInfo::getMemOpBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg, + unsigned &Offset, + const TargetRegisterInfo *TRI) const { switch (LdSt->getOpcode()) { default: return false; @@ -1336,7 +1336,7 @@ }; } -bool AArch64InstrInfo::getLdStBaseRegImmOfsWidth( +bool AArch64InstrInfo::getMemOpBaseRegImmOfsWidth( MachineInstr *LdSt, unsigned &BaseReg, int &Offset, int &Width, const TargetRegisterInfo *TRI) const { // Handle only loads/stores with base register followed by immediate offset. @@ -1434,7 +1434,7 @@ /// Detect opportunities for ldp/stp formation. /// -/// Only called for LdSt for which getLdStBaseRegImmOfs returns true. +/// Only called for LdSt for which getMemOpBaseRegImmOfs returns true. bool AArch64InstrInfo::shouldClusterLoads(MachineInstr *FirstLdSt, MachineInstr *SecondLdSt, unsigned NumLoads) const { @@ -1443,7 +1443,7 @@ return false; if (FirstLdSt->getOpcode() != SecondLdSt->getOpcode()) return false; - // getLdStBaseRegImmOfs guarantees that oper 2 isImm. + // getMemOpBaseRegImmOfs guarantees that oper 2 isImm. unsigned Ofs1 = FirstLdSt->getOperand(2).getImm(); // Allow 6 bits of positive range. if (Ofs1 > 64) Index: lib/Target/AArch64/AArch64StorePairSuppress.cpp =================================================================== --- lib/Target/AArch64/AArch64StorePairSuppress.cpp +++ lib/Target/AArch64/AArch64StorePairSuppress.cpp @@ -142,7 +142,7 @@ continue; unsigned BaseReg; unsigned Offset; - if (TII->getLdStBaseRegImmOfs(&MI, BaseReg, Offset, TRI)) { + if (TII->getMemOpBaseRegImmOfs(&MI, BaseReg, Offset, TRI)) { if (PrevBaseReg == BaseReg) { // If this block can take STPs, skip ahead to the next block. if (!SuppressSTP && shouldAddSTPToBlock(MI.getParent())) Index: lib/Target/R600/SIInstrInfo.h =================================================================== --- lib/Target/R600/SIInstrInfo.h +++ lib/Target/R600/SIInstrInfo.h @@ -79,9 +79,9 @@ int64_t &Offset1, int64_t &Offset2) const override; - bool getLdStBaseRegImmOfs(MachineInstr *LdSt, - unsigned &BaseReg, unsigned &Offset, - const TargetRegisterInfo *TRI) const final; + bool getMemOpBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg, + unsigned &Offset, + const TargetRegisterInfo *TRI) const final; bool shouldClusterLoads(MachineInstr *FirstLdSt, MachineInstr *SecondLdSt, Index: lib/Target/R600/SIInstrInfo.cpp =================================================================== --- lib/Target/R600/SIInstrInfo.cpp +++ lib/Target/R600/SIInstrInfo.cpp @@ -200,9 +200,9 @@ } } -bool SIInstrInfo::getLdStBaseRegImmOfs(MachineInstr *LdSt, - unsigned &BaseReg, unsigned &Offset, - const TargetRegisterInfo *TRI) const { +bool SIInstrInfo::getMemOpBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg, + unsigned &Offset, + const TargetRegisterInfo *TRI) const { unsigned Opc = LdSt->getOpcode(); if (isDS(Opc)) { const MachineOperand *OffsetImm = getNamedOperand(*LdSt, @@ -1053,8 +1053,8 @@ unsigned BaseReg0, Offset0; unsigned BaseReg1, Offset1; - if (getLdStBaseRegImmOfs(MIa, BaseReg0, Offset0, &RI) && - getLdStBaseRegImmOfs(MIb, BaseReg1, Offset1, &RI)) { + if (getMemOpBaseRegImmOfs(MIa, BaseReg0, Offset0, &RI) && + getMemOpBaseRegImmOfs(MIb, BaseReg1, Offset1, &RI)) { assert(MIa->hasOneMemOperand() && MIb->hasOneMemOperand() && "read2 / write2 not expected here yet"); unsigned Width0 = (*MIa->memoperands_begin())->getSize(); Index: lib/Target/X86/X86InstrInfo.h =================================================================== --- lib/Target/X86/X86InstrInfo.h +++ lib/Target/X86/X86InstrInfo.h @@ -254,6 +254,10 @@ MachineBasicBlock *&FBB, SmallVectorImpl &Cond, bool AllowModify) const override; + + bool getMemOpBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg, + unsigned &Offset, + const TargetRegisterInfo *TRI) const override; unsigned RemoveBranch(MachineBasicBlock &MBB) const override; unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, Index: lib/Target/X86/X86InstrInfo.cpp =================================================================== --- lib/Target/X86/X86InstrInfo.cpp +++ lib/Target/X86/X86InstrInfo.cpp @@ -3967,6 +3967,36 @@ } } +bool X86InstrInfo::getMemOpBaseRegImmOfs(MachineInstr *MemOp, unsigned &BaseReg, + unsigned &Offset, + const TargetRegisterInfo *TRI) const { + const MCInstrDesc &Desc = MemOp->getDesc(); + int MemRefBegin = X86II::getMemoryOperandNo(Desc.TSFlags, MemOp->getOpcode()); + if (MemRefBegin < 0) + return false; + + MemRefBegin += X86II::getOperandBias(Desc); + + BaseReg = MemOp->getOperand(MemRefBegin + X86::AddrBaseReg).getReg(); + if (MemOp->getOperand(MemRefBegin + X86::AddrScaleAmt).getImm() != 1) + return false; + + if (MemOp->getOperand(MemRefBegin + X86::AddrIndexReg).getReg() != + X86::NoRegister) + return false; + + const MachineOperand &DispMO = MemOp->getOperand(MemRefBegin + X86::AddrDisp); + + // Displacement can be symbolic + if (!DispMO.isImm()) + return false; + + Offset = DispMO.getImm(); + + return (MemOp->getOperand(MemRefBegin + X86::AddrIndexReg).getReg() == + X86::NoRegister); +} + static unsigned getStoreRegOpcode(unsigned SrcReg, const TargetRegisterClass *RC, bool isStackAligned,