Index: llvm/lib/Target/RISCV/RISCVInstrInfo.h =================================================================== --- llvm/lib/Target/RISCV/RISCVInstrInfo.h +++ llvm/lib/Target/RISCV/RISCVInstrInfo.h @@ -21,10 +21,12 @@ namespace llvm { +class RISCVSubtarget; + class RISCVInstrInfo : public RISCVGenInstrInfo { public: - RISCVInstrInfo(); + explicit RISCVInstrInfo(RISCVSubtarget &STI); unsigned isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override; @@ -80,6 +82,17 @@ int64_t BrOffset) const override; bool isAsCheapAsAMove(const MachineInstr &MI) const override; + + bool getMemOperandWithOffsetWidth(const MachineInstr &LdSt, + const MachineOperand *&BaseOp, + int64_t &Offset, unsigned &Width, + const TargetRegisterInfo *TRI) const; + + bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, + const MachineInstr &MIb) const override; + +protected: + const RISCVSubtarget &STI; }; } #endif Index: llvm/lib/Target/RISCV/RISCVInstrInfo.cpp =================================================================== --- llvm/lib/Target/RISCV/RISCVInstrInfo.cpp +++ llvm/lib/Target/RISCV/RISCVInstrInfo.cpp @@ -29,8 +29,9 @@ using namespace llvm; -RISCVInstrInfo::RISCVInstrInfo() - : RISCVGenInstrInfo(RISCV::ADJCALLSTACKDOWN, RISCV::ADJCALLSTACKUP) {} +RISCVInstrInfo::RISCVInstrInfo(RISCVSubtarget &STI) + : RISCVGenInstrInfo(RISCV::ADJCALLSTACKDOWN, RISCV::ADJCALLSTACKUP), + STI(STI) {} unsigned RISCVInstrInfo::isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const { @@ -486,3 +487,58 @@ } return MI.isAsCheapAsAMove(); } + +// Return true if get the base operand, byte offset of an instruction and the +// memory width. Width is the size of memory that is being loaded/stored. +bool RISCVInstrInfo::getMemOperandWithOffsetWidth( + const MachineInstr &LdSt, const MachineOperand *&BaseReg, int64_t &Offset, + unsigned &Width, const TargetRegisterInfo *TRI) const { + assert(LdSt.mayLoadOrStore() && "Expected a memory operation."); + + // Here we assume the standard RISC-V ISA, which uses a base+offset + // addressing mode. You'll need to relax these conditions to support custom + // load/stores instructions. + if (LdSt.getNumExplicitOperands() != 3) + return false; + if (!LdSt.getOperand(1).isReg() || !LdSt.getOperand(2).isImm()) + return false; + + if (!LdSt.hasOneMemOperand()) + return false; + + Width = (*LdSt.memoperands_begin())->getSize(); + BaseReg = &LdSt.getOperand(1); + Offset = LdSt.getOperand(2).getImm(); + return true; +} + +bool RISCVInstrInfo::areMemAccessesTriviallyDisjoint( + const MachineInstr &MIa, const MachineInstr &MIb) const { + assert(MIa.mayLoadOrStore() && "MIa must be a load or store."); + assert(MIb.mayLoadOrStore() && "MIb must be a load or store."); + + if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects() || + MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef()) + return false; + + // Retrieve the base register, offset from the base register and width. Width + // is the size of memory that is being loaded/stored (e.g. 1, 2, 4). If + // base registers are identical, and the offset of a lower memory access + + // the width doesn't overlap the offset of a higher memory access, + // then the memory accesses are different. + const TargetRegisterInfo *TRI = STI.getRegisterInfo(); + const MachineOperand *BaseOpA = nullptr, *BaseOpB = nullptr; + int64_t OffsetA = 0, OffsetB = 0; + unsigned int WidthA = 0, WidthB = 0; + if (getMemOperandWithOffsetWidth(MIa, BaseOpA, OffsetA, WidthA, TRI) && + getMemOperandWithOffsetWidth(MIb, BaseOpB, OffsetB, WidthB, TRI)) { + if (BaseOpA->isIdenticalTo(*BaseOpB)) { + int LowOffset = std::min(OffsetA, OffsetB); + int HighOffset = std::max(OffsetA, OffsetB); + int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB; + if (LowOffset + LowWidth <= HighOffset) + return true; + } + } + return false; +} Index: llvm/lib/Target/RISCV/RISCVSubtarget.cpp =================================================================== --- llvm/lib/Target/RISCV/RISCVSubtarget.cpp +++ llvm/lib/Target/RISCV/RISCVSubtarget.cpp @@ -51,7 +51,7 @@ StringRef ABIName, const TargetMachine &TM) : RISCVGenSubtargetInfo(TT, CPU, FS), FrameLowering(initializeSubtargetDependencies(TT, CPU, FS, ABIName)), - InstrInfo(), RegInfo(getHwMode()), TLInfo(TM, *this) { + InstrInfo(*this), RegInfo(getHwMode()), TLInfo(TM, *this) { CallLoweringInfo.reset(new RISCVCallLowering(*getTargetLowering())); Legalizer.reset(new RISCVLegalizerInfo(*this)); Index: llvm/test/CodeGen/RISCV/disjoint.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/RISCV/disjoint.ll @@ -0,0 +1,21 @@ +; REQUIRES: asserts +; RUN: llc -mtriple=riscv32 -debug-only=machine-scheduler < %s \ +; RUN: -o /dev/null 2>&1 | FileCheck %s +; RUN: llc -mtriple=riscv64 -debug-only=machine-scheduler < %s \ +; RUN: -o /dev/null 2>&1 | FileCheck %s + +define i32 @test_disjoint(i32* %P, i32 %v) { +entry: +; CHECK: ********** MI Scheduling ********** +; CHECK-LABEL: test_disjoint:%bb.0 +; CHECK:SU(2): SW %1:gpr, %0:gpr, 12 :: (store 4 into %ir.arrayidx) +; CHECK-NOT: Successors: +; CHECK:SU(3): SW %1:gpr, %0:gpr, 8 :: (store 4 into %ir.arrayidx1) +; CHECK: Predecessors: +; CHECK-NOT: SU(2): Ord Latency=0 Memory + %arrayidx = getelementptr inbounds i32, i32* %P, i32 3 + store i32 %v, i32* %arrayidx + %arrayidx1 = getelementptr inbounds i32, i32* %P, i32 2 + store i32 %v, i32* %arrayidx1 + ret i32 %v +}