Index: lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp +++ lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp @@ -63,7 +63,7 @@ // instructions to run at the double precision rate for the device so it's // probably best to just report no single precision denormals. static uint32_t getFPMode(const MachineFunction &F) { - const AMDGPUSubtarget& ST = F.getSubtarget(); + const SISubtarget& ST = F.getSubtarget(); // TODO: Is there any real use for the flush in only / flush out only modes? uint32_t FP32Denormals = @@ -243,9 +243,8 @@ void AMDGPUAsmPrinter::EmitProgramInfoR600(const MachineFunction &MF) { unsigned MaxGPR = 0; bool killPixel = false; - const AMDGPUSubtarget &STM = MF.getSubtarget(); - const R600RegisterInfo *RI = - static_cast(STM.getRegisterInfo()); + const R600Subtarget &STM = MF.getSubtarget(); + const R600RegisterInfo *RI = STM.getRegisterInfo(); const R600MachineFunctionInfo *MFI = MF.getInfo(); for (const MachineBasicBlock &MBB : MF) { @@ -268,7 +267,7 @@ } unsigned RsrcReg; - if (STM.getGeneration() >= AMDGPUSubtarget::EVERGREEN) { + if (STM.getGeneration() >= R600Subtarget::EVERGREEN) { // Evergreen / Northern Islands switch (MF.getFunction()->getCallingConv()) { default: // Fall through @@ -302,17 +301,15 @@ void AMDGPUAsmPrinter::getSIProgramInfo(SIProgramInfo &ProgInfo, const MachineFunction &MF) const { - const AMDGPUSubtarget &STM = MF.getSubtarget(); + const SISubtarget &STM = MF.getSubtarget(); const SIMachineFunctionInfo *MFI = MF.getInfo(); uint64_t CodeSize = 0; unsigned MaxSGPR = 0; unsigned MaxVGPR = 0; bool VCCUsed = false; bool FlatUsed = false; - const SIRegisterInfo *RI = - static_cast(STM.getRegisterInfo()); - const SIInstrInfo *TII = - static_cast(STM.getInstrInfo()); + const SIRegisterInfo *RI = STM.getRegisterInfo(); + const SIInstrInfo *TII = STM.getInstrInfo(); for (const MachineBasicBlock &MBB : MF) { for (const MachineInstr &MI : MBB) { @@ -425,7 +422,7 @@ if (VCCUsed) ExtraSGPRs = 2; - if (STM.getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS) { + if (STM.getGeneration() < SISubtarget::VOLCANIC_ISLANDS) { if (FlatUsed) ExtraSGPRs = 4; } else { @@ -453,7 +450,7 @@ ProgInfo.NumSGPR = MaxSGPR + 1; if (STM.hasSGPRInitBug()) { - if (ProgInfo.NumSGPR > AMDGPUSubtarget::FIXED_SGPR_COUNT_FOR_INIT_BUG) { + if (ProgInfo.NumSGPR > SISubtarget::FIXED_SGPR_COUNT_FOR_INIT_BUG) { LLVMContext &Ctx = MF.getFunction()->getContext(); DiagnosticInfoResourceLimit Diag(*MF.getFunction(), "SGPRs with SGPR init bug", @@ -461,7 +458,7 @@ Ctx.diagnose(Diag); } - ProgInfo.NumSGPR = AMDGPUSubtarget::FIXED_SGPR_COUNT_FOR_INIT_BUG; + ProgInfo.NumSGPR = SISubtarget::FIXED_SGPR_COUNT_FOR_INIT_BUG; } if (MFI->NumUserSGPRs > STM.getMaxNumUserSGPRs()) { @@ -497,7 +494,7 @@ ProgInfo.CodeLen = CodeSize; unsigned LDSAlignShift; - if (STM.getGeneration() < AMDGPUSubtarget::SEA_ISLANDS) { + if (STM.getGeneration() < SISubtarget::SEA_ISLANDS) { // LDS is allocated in 64 dword blocks. LDSAlignShift = 8; } else { @@ -564,7 +561,7 @@ void AMDGPUAsmPrinter::EmitProgramInfoSI(const MachineFunction &MF, const SIProgramInfo &KernelInfo) { - const AMDGPUSubtarget &STM = MF.getSubtarget(); + const SISubtarget &STM = MF.getSubtarget(); const SIMachineFunctionInfo *MFI = MF.getInfo(); unsigned RsrcReg = getRsrcReg(MF.getFunction()->getCallingConv()); @@ -618,7 +615,7 @@ void AMDGPUAsmPrinter::EmitAmdKernelCodeT(const MachineFunction &MF, const SIProgramInfo &KernelInfo) const { const SIMachineFunctionInfo *MFI = MF.getInfo(); - const AMDGPUSubtarget &STM = MF.getSubtarget(); + const SISubtarget &STM = MF.getSubtarget(); amd_kernel_code_t header; AMDGPU::initDefaultAMDKernelCodeT(header, STM.getFeatureBits()); Index: lib/Target/AMDGPU/AMDGPUFrameLowering.h =================================================================== --- lib/Target/AMDGPU/AMDGPUFrameLowering.h +++ lib/Target/AMDGPU/AMDGPUFrameLowering.h @@ -32,13 +32,18 @@ /// \returns The number of 32-bit sub-registers that are used when storing /// values to the stack. unsigned getStackWidth(const MachineFunction &MF) const; + int getFrameIndexReference(const MachineFunction &MF, int FI, unsigned &FrameReg) const override; - const SpillSlot * - getCalleeSavedSpillSlots(unsigned &NumEntries) const override; - void emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const override; - void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override; - bool hasFP(const MachineFunction &MF) const override; + + const SpillSlot *getCalleeSavedSpillSlots(unsigned &NumEntries) const override { + NumEntries = 0; + return nullptr; + } + + bool hasFP(const MachineFunction &MF) const override { + return false; + } }; } // namespace llvm #endif Index: lib/Target/AMDGPU/AMDGPUFrameLowering.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPUFrameLowering.cpp +++ lib/Target/AMDGPU/AMDGPUFrameLowering.cpp @@ -12,7 +12,8 @@ //===----------------------------------------------------------------------===// #include "AMDGPUFrameLowering.h" #include "AMDGPURegisterInfo.h" -#include "R600MachineFunctionInfo.h" +#include "AMDGPUSubtarget.h" + #include "llvm/CodeGen/MachineFrameInfo.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/IR/Instructions.h" @@ -75,7 +76,8 @@ int FI, unsigned &FrameReg) const { const MachineFrameInfo *MFI = MF.getFrameInfo(); - const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo(); + const AMDGPURegisterInfo *RI + = MF.getSubtarget().getRegisterInfo(); // Fill in FrameReg output argument. FrameReg = RI->getFrameRegister(MF); @@ -100,19 +102,3 @@ return OffsetBytes / (getStackWidth(MF) * 4); } -const TargetFrameLowering::SpillSlot * -AMDGPUFrameLowering::getCalleeSavedSpillSlots(unsigned &NumEntries) const { - NumEntries = 0; - return nullptr; -} -void AMDGPUFrameLowering::emitPrologue(MachineFunction &MF, - MachineBasicBlock &MBB) const {} -void -AMDGPUFrameLowering::emitEpilogue(MachineFunction &MF, - MachineBasicBlock &MBB) const { -} - -bool -AMDGPUFrameLowering::hasFP(const MachineFunction &MF) const { - return false; -} Index: lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp +++ lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp @@ -177,7 +177,7 @@ : SelectionDAGISel(TM) {} bool AMDGPUDAGToDAGISel::runOnMachineFunction(MachineFunction &MF) { - Subtarget = &static_cast(MF.getSubtarget()); + Subtarget = &MF.getSubtarget(); return SelectionDAGISel::runOnMachineFunction(MF); } Index: lib/Target/AMDGPU/AMDGPUISelLowering.h =================================================================== --- lib/Target/AMDGPU/AMDGPUISelLowering.h +++ lib/Target/AMDGPU/AMDGPUISelLowering.h @@ -116,7 +116,7 @@ const SmallVectorImpl &Outs) const; public: - AMDGPUTargetLowering(TargetMachine &TM, const AMDGPUSubtarget &STI); + AMDGPUTargetLowering(const TargetMachine &TM, const AMDGPUSubtarget &STI); bool isFAbsFree(EVT VT) const override; bool isFNegFree(EVT VT) const override; Index: lib/Target/AMDGPU/AMDGPUISelLowering.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPUISelLowering.cpp +++ lib/Target/AMDGPU/AMDGPUISelLowering.cpp @@ -64,7 +64,7 @@ return EVT::getVectorVT(Ctx, MVT::i32, StoreSize / 32); } -AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM, +AMDGPUTargetLowering::AMDGPUTargetLowering(const TargetMachine &TM, const AMDGPUSubtarget &STI) : TargetLowering(TM), Subtarget(&STI) { // Lower floating point store/load to integer store/load to reduce the number Index: lib/Target/AMDGPU/AMDGPUInstrInfo.h =================================================================== --- lib/Target/AMDGPU/AMDGPUInstrInfo.h +++ lib/Target/AMDGPU/AMDGPUInstrInfo.h @@ -16,7 +16,6 @@ #ifndef LLVM_LIB_TARGET_AMDGPU_AMDGPUINSTRINFO_H #define LLVM_LIB_TARGET_AMDGPU_AMDGPUINSTRINFO_H -#include "AMDGPURegisterInfo.h" #include "llvm/Target/TargetInstrInfo.h" #define GET_INSTRINFO_HEADER @@ -38,16 +37,13 @@ class AMDGPUInstrInfo : public AMDGPUGenInstrInfo { private: - const AMDGPURegisterInfo RI; - virtual void anchor(); -protected: const AMDGPUSubtarget &ST; -public: - explicit AMDGPUInstrInfo(const AMDGPUSubtarget &st); - virtual const AMDGPURegisterInfo &getRegisterInfo() const = 0; + virtual void anchor(); public: + explicit AMDGPUInstrInfo(const AMDGPUSubtarget &st); + /// \returns the smallest register index that will be accessed by an indirect /// read or write or -1 if indirect addressing is not used by this program. int getIndirectIndexBegin(const MachineFunction &MF) const; @@ -80,7 +76,6 @@ /// \brief Given a MIMG \p Opcode that writes all 4 channels, return the /// equivalent opcode that writes \p Channels Channels. int getMaskedMIMGOp(uint16_t Opcode, unsigned Channels) const; - }; namespace AMDGPU { Index: lib/Target/AMDGPU/AMDGPUInstrInfo.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPUInstrInfo.cpp +++ lib/Target/AMDGPU/AMDGPUInstrInfo.cpp @@ -30,12 +30,8 @@ // Pin the vtable to this file. void AMDGPUInstrInfo::anchor() {} -AMDGPUInstrInfo::AMDGPUInstrInfo(const AMDGPUSubtarget &st) - : AMDGPUGenInstrInfo(-1, -1), ST(st) {} - -const AMDGPURegisterInfo &AMDGPUInstrInfo::getRegisterInfo() const { - return RI; -} +AMDGPUInstrInfo::AMDGPUInstrInfo(const AMDGPUSubtarget &ST) + : AMDGPUGenInstrInfo(-1, -1), ST(ST) {} bool AMDGPUInstrInfo::enableClusterLoads() const { return true; @@ -111,9 +107,11 @@ return -1; } + const AMDGPUSubtarget &ST = MF.getSubtarget(); + const AMDGPUFrameLowering *TFL = ST.getFrameLowering(); + unsigned IgnoredFrameReg; - Offset = MF.getSubtarget().getFrameLowering()->getFrameIndexReference( - MF, -1, IgnoredFrameReg); + Offset = TFL->getFrameIndexReference(MF, -1, IgnoredFrameReg); return getIndirectIndexBegin(MF) + Offset; } @@ -138,18 +136,18 @@ } } -// This must be kept in sync with the SISubtarget class in SIInstrInfo.td -enum SISubtarget { +// This must be kept in sync with the SIEncodingFamily class in SIInstrInfo.td +enum SIEncodingFamily { SI = 0, VI = 1 }; -static enum SISubtarget AMDGPUSubtargetToSISubtarget(unsigned Gen) { +static SIEncodingFamily AMDGPUSubtargetToSISubtarget(unsigned Gen) { switch (Gen) { default: - return SI; + return SIEncodingFamily::SI; case AMDGPUSubtarget::VOLCANIC_ISLANDS: - return VI; + return SIEncodingFamily::VI; } } Index: lib/Target/AMDGPU/AMDGPUMCInstLower.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPUMCInstLower.cpp +++ lib/Target/AMDGPU/AMDGPUMCInstLower.cpp @@ -15,6 +15,7 @@ #include "AMDGPUMCInstLower.h" #include "AMDGPUAsmPrinter.h" +#include "AMDGPUSubtarget.h" #include "AMDGPUTargetMachine.h" #include "InstPrinter/AMDGPUInstPrinter.h" #include "SIInstrInfo.h" @@ -36,8 +37,7 @@ using namespace llvm; AMDGPUMCInstLower::AMDGPUMCInstLower(MCContext &ctx, const AMDGPUSubtarget &st): - Ctx(ctx), ST(st) -{ } + Ctx(ctx), ST(st) { } void AMDGPUMCInstLower::lower(const MachineInstr *MI, MCInst &OutMI) const { @@ -140,10 +140,9 @@ raw_string_ostream DisasmStream(DisasmLine); AMDGPUInstPrinter InstPrinter(*TM.getMCAsmInfo(), - *MF->getSubtarget().getInstrInfo(), - *MF->getSubtarget().getRegisterInfo()); - InstPrinter.printInst(&TmpInst, DisasmStream, StringRef(), - MF->getSubtarget()); + *STI.getInstrInfo(), + *STI.getRegisterInfo()); + InstPrinter.printInst(&TmpInst, DisasmStream, StringRef(), STI); // Disassemble instruction/operands to hex representation. SmallVector Fixups; Index: lib/Target/AMDGPU/AMDGPURegisterInfo.h =================================================================== --- lib/Target/AMDGPU/AMDGPURegisterInfo.h +++ lib/Target/AMDGPU/AMDGPURegisterInfo.h @@ -29,18 +29,8 @@ class TargetInstrInfo; struct AMDGPURegisterInfo : public AMDGPUGenRegisterInfo { - static const MCPhysReg CalleeSavedReg; - AMDGPURegisterInfo(); - BitVector getReservedRegs(const MachineFunction &MF) const override { - assert(!"Unimplemented"); return BitVector(); - } - - virtual unsigned getHWRegIndex(unsigned Reg) const { - assert(!"Unimplemented"); return 0; - } - /// \returns the sub reg enum value for the given \p Channel /// (e.g. getSubRegFromChannel(0) -> AMDGPU::sub0) unsigned getSubRegFromChannel(unsigned Channel) const; @@ -52,7 +42,6 @@ unsigned getFrameRegister(const MachineFunction &MF) const override; unsigned getIndirectSubReg(unsigned IndirectIndex) const; - }; } // End namespace llvm Index: lib/Target/AMDGPU/AMDGPURegisterInfo.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPURegisterInfo.cpp +++ lib/Target/AMDGPU/AMDGPURegisterInfo.cpp @@ -24,10 +24,11 @@ // they are not supported at this time. //===----------------------------------------------------------------------===// -const MCPhysReg AMDGPURegisterInfo::CalleeSavedReg = AMDGPU::NoRegister; +// Dummy to not crash RegisterClassInfo. +static const MCPhysReg CalleeSavedReg = AMDGPU::NoRegister; -const MCPhysReg* -AMDGPURegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { +const MCPhysReg *AMDGPURegisterInfo::getCalleeSavedRegs( + const MachineFunction *) const { return &CalleeSavedReg; } @@ -55,7 +56,6 @@ } unsigned AMDGPURegisterInfo::getIndirectSubReg(unsigned IndirectIndex) const { - return getSubRegFromChannel(IndirectIndex); } Index: lib/Target/AMDGPU/AMDGPUSubtarget.h =================================================================== --- lib/Target/AMDGPU/AMDGPUSubtarget.h +++ lib/Target/AMDGPU/AMDGPUSubtarget.h @@ -16,10 +16,12 @@ #define LLVM_LIB_TARGET_AMDGPU_AMDGPUSUBTARGET_H #include "AMDGPU.h" -#include "AMDGPUFrameLowering.h" -#include "AMDGPUISelLowering.h" -#include "AMDGPUInstrInfo.h" -#include "AMDGPUSubtarget.h" +#include "R600InstrInfo.h" +#include "R600ISelLowering.h" +#include "R600FrameLowering.h" +#include "SIInstrInfo.h" +#include "SIISelLowering.h" +#include "SIFrameLowering.h" #include "Utils/AMDGPUBaseInfo.h" #include "llvm/CodeGen/GlobalISel/GISelAccessor.h" #include "llvm/Target/TargetSubtargetInfo.h" @@ -29,11 +31,10 @@ namespace llvm { -class StringRef; class SIMachineFunctionInfo; +class StringRef; class AMDGPUSubtarget : public AMDGPUGenSubtargetInfo { - public: enum Generation { R600 = 0, @@ -46,10 +47,6 @@ }; enum { - FIXED_SGPR_COUNT_FOR_INIT_BUG = 80 - }; - - enum { ISAVersion0_0_0, ISAVersion7_0_0, ISAVersion7_0_1, @@ -58,113 +55,104 @@ ISAVersion8_0_3 }; -private: - bool DumpCode; - bool R600ALUInst; - bool HasVertexCache; - short TexVTXClauseSize; +protected: + // Basic subtarget description. + Triple TargetTriple; Generation Gen; - bool FP64; - bool FP64Denormals; - bool FP32Denormals; - bool FPExceptions; + unsigned IsaVersion; + unsigned WavefrontSize; + int LocalMemorySize; + int LDSBankCount; + unsigned MaxPrivateElementSize; + + // Possibly statically set by tablegen, but may want to be overridden. bool FastFMAF32; bool HalfRate64Ops; - bool CaymanISA; - bool FlatAddressSpace; + + // Dynamially set bits that enable features. + bool FP32Denormals; + bool FP64Denormals; + bool FPExceptions; bool FlatForGlobal; + bool EnableXNACK; + bool DebuggerInsertNops; + bool DebuggerReserveRegs; + + // Used as options. + bool EnableVGPRSpilling; bool EnableIRStructurizer; bool EnablePromoteAlloca; bool EnableIfCvt; bool EnableLoadStoreOpt; bool EnableUnsafeDSOffsetFolding; - bool EnableXNACK; - unsigned WavefrontSize; - bool CFALUBug; - int LocalMemorySize; - unsigned MaxPrivateElementSize; - bool EnableVGPRSpilling; - bool SGPRInitBug; + bool EnableSIScheduler; + bool DumpCode; + + // Subtarget statically properties set by tablegen + bool FP64; bool IsGCN; bool GCN1Encoding; bool GCN3Encoding; bool CIInsts; + bool SGPRInitBug; bool HasSMemRealTime; bool Has16BitInsts; + bool FlatAddressSpace; + bool R600ALUInst; + bool CaymanISA; + bool CFALUBug; + bool HasVertexCache; + short TexVTXClauseSize; + + // Dummy feature to use for assembler in tablegen. bool FeatureDisable; - int LDSBankCount; - unsigned IsaVersion; - bool EnableSIScheduler; - bool DebuggerInsertNops; - bool DebuggerReserveRegs; - std::unique_ptr FrameLowering; - std::unique_ptr TLInfo; - std::unique_ptr InstrInfo; - std::unique_ptr GISel; InstrItineraryData InstrItins; - Triple TargetTriple; public: - AMDGPUSubtarget(const Triple &TT, StringRef CPU, StringRef FS, - TargetMachine &TM); + AMDGPUSubtarget(const Triple &TT, StringRef GPU, StringRef FS, + const TargetMachine &TM); + virtual ~AMDGPUSubtarget(); AMDGPUSubtarget &initializeSubtargetDependencies(const Triple &TT, StringRef GPU, StringRef FS); - void setGISelAccessor(GISelAccessor &GISel) { - this->GISel.reset(&GISel); - } + const AMDGPUInstrInfo *getInstrInfo() const override; + const AMDGPUFrameLowering *getFrameLowering() const override; + const AMDGPUTargetLowering *getTargetLowering() const override; + const AMDGPURegisterInfo *getRegisterInfo() const override; - const AMDGPUFrameLowering *getFrameLowering() const override { - return FrameLowering.get(); - } - const AMDGPUInstrInfo *getInstrInfo() const override { - return InstrInfo.get(); - } - const AMDGPURegisterInfo *getRegisterInfo() const override { - return &InstrInfo->getRegisterInfo(); - } - AMDGPUTargetLowering *getTargetLowering() const override { - return TLInfo.get(); - } const InstrItineraryData *getInstrItineraryData() const override { return &InstrItins; } - const CallLowering *getCallLowering() const override; - void ParseSubtargetFeatures(StringRef CPU, StringRef FS); - bool hasVertexCache() const { - return HasVertexCache; - } - - short getTexVTXClauseSize() const { - return TexVTXClauseSize; + bool isAmdHsaOS() const { + return TargetTriple.getOS() == Triple::AMDHSA; } Generation getGeneration() const { return Gen; } - bool hasHWFP64() const { - return FP64; + unsigned getWavefrontSize() const { + return WavefrontSize; } - bool hasCaymanISA() const { - return CaymanISA; + int getLocalMemorySize() const { + return LocalMemorySize; } - bool hasFP32Denormals() const { - return FP32Denormals; + int getLDSBankCount() const { + return LDSBankCount; } - bool hasFP64Denormals() const { - return FP64Denormals; + unsigned getMaxPrivateElementSize() const { + return MaxPrivateElementSize; } - bool hasFPExceptions() const { - return FPExceptions; + bool hasHWFP64() const { + return FP64; } bool hasFastFMAF32() const { @@ -175,22 +163,6 @@ return HalfRate64Ops; } - bool hasFlatAddressSpace() const { - return FlatAddressSpace; - } - - bool hasSMemRealTime() const { - return HasSMemRealTime; - } - - bool has16BitInsts() const { - return Has16BitInsts; - } - - bool useFlatForGlobal() const { - return FlatForGlobal; - } - bool hasAddr64() const { return (getGeneration() < VOLCANIC_ISLANDS); } @@ -242,6 +214,10 @@ return (getGeneration() >= EVERGREEN); } + bool hasCaymanISA() const { + return CaymanISA; + } + bool IsIRStructurizerEnabled() const { return EnableIRStructurizer; } @@ -254,23 +230,12 @@ return EnableIfCvt; } - bool loadStoreOptEnabled() const { - return EnableLoadStoreOpt; - } - bool unsafeDSOffsetFoldingEnabled() const { return EnableUnsafeDSOffsetFolding; } - unsigned getWavefrontSize() const { - return WavefrontSize; - } - - unsigned getStackEntrySize() const; - - bool hasCFAluBug() const { - assert(getGeneration() <= NORTHERN_ISLANDS); - return CFALUBug; + bool dumpCode() const { + return DumpCode; } /// Return the amount of LDS that can be used that will not restrict the @@ -282,89 +247,212 @@ unsigned getOccupancyWithLocalMemSize(uint32_t Bytes) const; - int getLocalMemorySize() const { - return LocalMemorySize; + bool hasFP32Denormals() const { + return FP32Denormals; } - unsigned getMaxPrivateElementSize() const { - return MaxPrivateElementSize; + bool hasFP64Denormals() const { + return FP64Denormals; } - bool hasSGPRInitBug() const { - return SGPRInitBug; + bool hasFPExceptions() const { + return FPExceptions; } - int getLDSBankCount() const { - return LDSBankCount; + bool useFlatForGlobal() const { + return FlatForGlobal; } - unsigned getAmdKernelCodeChipID() const; + bool isXNACKEnabled() const { + return EnableXNACK; + } - AMDGPU::IsaVersion getIsaVersion() const; + unsigned getMaxWavesPerCU() const { + if (getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS) + return 10; + + // FIXME: Not sure what this is for other subtagets. + return 8; + } + + /// \brief Returns the offset in bytes from the start of the input buffer + /// of the first explicit kernel argument. + unsigned getExplicitKernelArgOffset() const { + return isAmdHsaOS() ? 0 : 36; + } + + unsigned getStackAlignment() const { + // Scratch is allocated in 256 dword per wave blocks. + return 4 * 256 / getWavefrontSize(); + } bool enableMachineScheduler() const override { return true; } - void overrideSchedPolicy(MachineSchedPolicy &Policy, - MachineInstr *begin, MachineInstr *end, - unsigned NumRegionInstrs) const override; + bool enableSubRegLiveness() const override { + return true; + } +}; - // Helper functions to simplify if statements - bool isTargetELF() const { - return false; +class R600Subtarget final : public AMDGPUSubtarget { +private: + R600InstrInfo InstrInfo; + R600FrameLowering FrameLowering; + R600TargetLowering TLInfo; + +public: + R600Subtarget(const Triple &TT, StringRef CPU, StringRef FS, + const TargetMachine &TM); + + const R600InstrInfo *getInstrInfo() const override { + return &InstrInfo; } - bool enableSIScheduler() const { - return EnableSIScheduler; + const R600FrameLowering *getFrameLowering() const override { + return &FrameLowering; } - bool debuggerInsertNops() const { - return DebuggerInsertNops; + const R600TargetLowering *getTargetLowering() const override { + return &TLInfo; } - bool debuggerReserveRegs() const { - return DebuggerReserveRegs; + const R600RegisterInfo *getRegisterInfo() const override { + return &InstrInfo.getRegisterInfo(); } - bool dumpCode() const { - return DumpCode; + bool hasCFAluBug() const { + return CFALUBug; } - bool r600ALUEncoding() const { - return R600ALUInst; + + bool hasVertexCache() const { + return HasVertexCache; } - bool isAmdHsaOS() const { - return TargetTriple.getOS() == Triple::AMDHSA; + + short getTexVTXClauseSize() const { + return TexVTXClauseSize; } - bool isVGPRSpillingEnabled(const Function& F) const; - bool isXNACKEnabled() const { - return EnableXNACK; + unsigned getStackEntrySize() const; +}; + +class SISubtarget final : public AMDGPUSubtarget { +public: + enum { + FIXED_SGPR_COUNT_FOR_INIT_BUG = 80 + }; + +private: + SIInstrInfo InstrInfo; + SIFrameLowering FrameLowering; + SITargetLowering TLInfo; + std::unique_ptr GISel; + +public: + SISubtarget(const Triple &TT, StringRef CPU, StringRef FS, + const TargetMachine &TM); + + const SIInstrInfo *getInstrInfo() const override { + return &InstrInfo; } - unsigned getMaxWavesPerCU() const { - if (getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS) - return 10; + const SIFrameLowering *getFrameLowering() const override { + return &FrameLowering; + } - // FIXME: Not sure what this is for other subtagets. - return 8; + const SITargetLowering *getTargetLowering() const override { + return &TLInfo; } - bool enableSubRegLiveness() const override { - return true; + const CallLowering *getCallLowering() const override { + assert(GISel && "Access to GlobalISel APIs not set"); + return GISel->getCallLowering(); } - /// \brief Returns the offset in bytes from the start of the input buffer - /// of the first explicit kernel argument. - unsigned getExplicitKernelArgOffset() const { - return isAmdHsaOS() ? 0 : 36; + const SIRegisterInfo *getRegisterInfo() const override { + return &InstrInfo.getRegisterInfo(); } + void setGISelAccessor(GISelAccessor &GISel) { + this->GISel.reset(&GISel); + } + + void overrideSchedPolicy(MachineSchedPolicy &Policy, + MachineInstr *Begin, MachineInstr *End, + unsigned NumRegionInstrs) const override; + + bool isVGPRSpillingEnabled(const Function& F) const; + + unsigned getAmdKernelCodeChipID() const; + + AMDGPU::IsaVersion getIsaVersion() const; + unsigned getMaxNumUserSGPRs() const { return 16; } + + bool hasFlatAddressSpace() const { + return FlatAddressSpace; + } + + bool hasSMemRealTime() const { + return HasSMemRealTime; + } + + bool has16BitInsts() const { + return Has16BitInsts; + } + + bool enableSIScheduler() const { + return EnableSIScheduler; + } + + bool debuggerInsertNops() const { + return DebuggerInsertNops; + } + + bool debuggerReserveRegs() const { + return DebuggerReserveRegs; + } + + bool loadStoreOptEnabled() const { + return EnableLoadStoreOpt; + } + + bool hasSGPRInitBug() const { + return SGPRInitBug; + } }; + +inline const AMDGPUInstrInfo *AMDGPUSubtarget::getInstrInfo() const { + if (getGeneration() >= SOUTHERN_ISLANDS) + return static_cast(this)->getInstrInfo(); + + return static_cast(this)->getInstrInfo(); +} + +inline const AMDGPUFrameLowering *AMDGPUSubtarget::getFrameLowering() const { + if (getGeneration() >= SOUTHERN_ISLANDS) + return static_cast(this)->getFrameLowering(); + + return static_cast(this)->getFrameLowering(); +} + +inline const AMDGPUTargetLowering *AMDGPUSubtarget::getTargetLowering() const { + if (getGeneration() >= SOUTHERN_ISLANDS) + return static_cast(this)->getTargetLowering(); + + return static_cast(this)->getTargetLowering(); +} + +inline const AMDGPURegisterInfo *AMDGPUSubtarget::getRegisterInfo() const { + if (getGeneration() >= SOUTHERN_ISLANDS) + return static_cast(this)->getRegisterInfo(); + + return static_cast(this)->getRegisterInfo(); +} + } // End namespace llvm #endif Index: lib/Target/AMDGPU/AMDGPUSubtarget.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPUSubtarget.cpp +++ lib/Target/AMDGPU/AMDGPUSubtarget.cpp @@ -44,6 +44,8 @@ } // End anonymous namespace. #endif +AMDGPUSubtarget::~AMDGPUSubtarget() {} + AMDGPUSubtarget & AMDGPUSubtarget::initializeSubtargetDependencies(const Triple &TT, StringRef GPU, StringRef FS) { @@ -79,82 +81,56 @@ } AMDGPUSubtarget::AMDGPUSubtarget(const Triple &TT, StringRef GPU, StringRef FS, - TargetMachine &TM) - : AMDGPUGenSubtargetInfo(TT, GPU, FS), - DumpCode(false), R600ALUInst(false), HasVertexCache(false), - TexVTXClauseSize(0), - Gen(TT.getArch() == Triple::amdgcn ? SOUTHERN_ISLANDS : R600), - FP64(false), - FP64Denormals(false), FP32Denormals(false), FPExceptions(false), - FastFMAF32(false), HalfRate64Ops(false), CaymanISA(false), - FlatAddressSpace(false), FlatForGlobal(false), EnableIRStructurizer(true), - EnablePromoteAlloca(false), - EnableIfCvt(true), EnableLoadStoreOpt(false), - EnableUnsafeDSOffsetFolding(false), - EnableXNACK(false), - WavefrontSize(64), CFALUBug(false), - LocalMemorySize(0), MaxPrivateElementSize(0), - EnableVGPRSpilling(false), SGPRInitBug(false), IsGCN(false), - GCN1Encoding(false), GCN3Encoding(false), CIInsts(false), - HasSMemRealTime(false), Has16BitInsts(false), - LDSBankCount(0), - IsaVersion(ISAVersion0_0_0), - EnableSIScheduler(false), - DebuggerInsertNops(false), DebuggerReserveRegs(false), - FrameLowering(nullptr), - GISel(), - InstrItins(getInstrItineraryForCPU(GPU)), TargetTriple(TT) { - + const TargetMachine &TM) + : AMDGPUGenSubtargetInfo(TT, GPU, FS), + TargetTriple(TT), + Gen(TT.getArch() == Triple::amdgcn ? SOUTHERN_ISLANDS : R600), + IsaVersion(ISAVersion0_0_0), + WavefrontSize(64), + LocalMemorySize(0), + LDSBankCount(0), + MaxPrivateElementSize(0), + + FastFMAF32(false), + HalfRate64Ops(false), + + FP32Denormals(false), + FP64Denormals(false), + FPExceptions(false), + FlatForGlobal(false), + EnableXNACK(false), + DebuggerInsertNops(false), + DebuggerReserveRegs(false), + + EnableVGPRSpilling(false), + EnableIRStructurizer(true), + EnablePromoteAlloca(false), + EnableIfCvt(true), + EnableLoadStoreOpt(false), + EnableUnsafeDSOffsetFolding(false), + EnableSIScheduler(false), + DumpCode(false), + + FP64(false), + IsGCN(false), + GCN1Encoding(false), + GCN3Encoding(false), + CIInsts(false), + SGPRInitBug(false), + HasSMemRealTime(false), + Has16BitInsts(false), + FlatAddressSpace(false), + + R600ALUInst(false), + CaymanISA(false), + CFALUBug(false), + HasVertexCache(false), + TexVTXClauseSize(0), + + FeatureDisable(false), + + InstrItins(getInstrItineraryForCPU(GPU)) { initializeSubtargetDependencies(TT, GPU, FS); - - // Scratch is allocated in 256 dword per wave blocks. - const unsigned StackAlign = 4 * 256 / getWavefrontSize(); - - if (getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) { - InstrInfo.reset(new R600InstrInfo(*this)); - TLInfo.reset(new R600TargetLowering(TM, *this)); - - // FIXME: Should have R600 specific FrameLowering - FrameLowering.reset(new AMDGPUFrameLowering( - TargetFrameLowering::StackGrowsUp, - StackAlign, - 0)); - } else { - InstrInfo.reset(new SIInstrInfo(*this)); - TLInfo.reset(new SITargetLowering(TM, *this)); - FrameLowering.reset(new SIFrameLowering( - TargetFrameLowering::StackGrowsUp, - StackAlign, - 0)); -#ifndef LLVM_BUILD_GLOBAL_ISEL - GISelAccessor *GISel = new GISelAccessor(); -#else - AMDGPUGISelActualAccessor *GISel = - new AMDGPUGISelActualAccessor(); - GISel->CallLoweringInfo.reset( - new AMDGPUCallLowering(*getTargetLowering())); -#endif - setGISelAccessor(*GISel); - } -} - -const CallLowering *AMDGPUSubtarget::getCallLowering() const { - assert(GISel && "Access to GlobalISel APIs not set"); - return GISel->getCallLowering(); -} - -unsigned AMDGPUSubtarget::getStackEntrySize() const { - assert(getGeneration() <= NORTHERN_ISLANDS); - switch(getWavefrontSize()) { - case 16: - return 8; - case 32: - return hasCaymanISA() ? 4 : 8; - case 64: - return 4; - default: - llvm_unreachable("Illegal wavefront size."); - } } // FIXME: These limits are for SI. Did they change with the larger maximum LDS @@ -215,40 +191,75 @@ return 1; } -unsigned AMDGPUSubtarget::getAmdKernelCodeChipID() const { - switch(getGeneration()) { - default: llvm_unreachable("ChipID unknown"); - case SEA_ISLANDS: return 12; - } -} - -AMDGPU::IsaVersion AMDGPUSubtarget::getIsaVersion() const { - return AMDGPU::getIsaVersion(getFeatureBits()); +R600Subtarget::R600Subtarget(const Triple &TT, StringRef GPU, StringRef FS, + const TargetMachine &TM) : + AMDGPUSubtarget(TT, GPU, FS, TM), + InstrInfo(*this), + FrameLowering(TargetFrameLowering::StackGrowsUp, getStackAlignment(), 0), + TLInfo(TM, *this) {} + +SISubtarget::SISubtarget(const Triple &TT, StringRef GPU, StringRef FS, + const TargetMachine &TM) : + AMDGPUSubtarget(TT, GPU, FS, TM), + InstrInfo(*this), + FrameLowering(TargetFrameLowering::StackGrowsUp, getStackAlignment(), 0), + TLInfo(TM, *this) { +#ifndef LLVM_BUILD_GLOBAL_ISEL + GISelAccessor *GISel = new GISelAccessor(); +#else + AMDGPUGISelActualAccessor *GISel = + new AMDGPUGISelActualAccessor(); + GISel->CallLoweringInfo.reset( + new AMDGPUCallLowering(*getTargetLowering())); +#endif + setGISelAccessor(*GISel); } -bool AMDGPUSubtarget::isVGPRSpillingEnabled(const Function& F) const { - return !AMDGPU::isShader(F.getCallingConv()) || EnableVGPRSpilling; +unsigned R600Subtarget::getStackEntrySize() const { + switch (getWavefrontSize()) { + case 16: + return 8; + case 32: + return hasCaymanISA() ? 4 : 8; + case 64: + return 4; + default: + llvm_unreachable("Illegal wavefront size."); + } } -void AMDGPUSubtarget::overrideSchedPolicy(MachineSchedPolicy &Policy, +void SISubtarget::overrideSchedPolicy(MachineSchedPolicy &Policy, MachineInstr *begin, MachineInstr *end, unsigned NumRegionInstrs) const { - if (getGeneration() >= SOUTHERN_ISLANDS) { - - // Track register pressure so the scheduler can try to decrease - // pressure once register usage is above the threshold defined by - // SIRegisterInfo::getRegPressureSetLimit() - Policy.ShouldTrackPressure = true; + // Track register pressure so the scheduler can try to decrease + // pressure once register usage is above the threshold defined by + // SIRegisterInfo::getRegPressureSetLimit() + Policy.ShouldTrackPressure = true; + + // Enabling both top down and bottom up scheduling seems to give us less + // register spills than just using one of these approaches on its own. + Policy.OnlyTopDown = false; + Policy.OnlyBottomUp = false; + + // Enabling ShouldTrackLaneMasks crashes the SI Machine Scheduler. + if (!enableSIScheduler()) + Policy.ShouldTrackLaneMasks = true; +} - // Enabling both top down and bottom up scheduling seems to give us less - // register spills than just using one of these approaches on its own. - Policy.OnlyTopDown = false; - Policy.OnlyBottomUp = false; +bool SISubtarget::isVGPRSpillingEnabled(const Function& F) const { + return EnableVGPRSpilling || !AMDGPU::isShader(F.getCallingConv()); +} - // Enabling ShouldTrackLaneMasks crashes the SI Machine Scheduler. - if (!enableSIScheduler()) - Policy.ShouldTrackLaneMasks = true; +unsigned SISubtarget::getAmdKernelCodeChipID() const { + switch (getGeneration()) { + case SEA_ISLANDS: + return 12; + default: + llvm_unreachable("ChipID unknown"); } } +AMDGPU::IsaVersion SISubtarget::getIsaVersion() const { + return AMDGPU::getIsaVersion(getFeatureBits()); +} Index: lib/Target/AMDGPU/AMDGPUTargetMachine.h =================================================================== --- lib/Target/AMDGPU/AMDGPUTargetMachine.h +++ lib/Target/AMDGPU/AMDGPUTargetMachine.h @@ -15,12 +15,8 @@ #ifndef LLVM_LIB_TARGET_AMDGPU_AMDGPUTARGETMACHINE_H #define LLVM_LIB_TARGET_AMDGPU_AMDGPUTARGETMACHINE_H -#include "AMDGPUFrameLowering.h" -#include "AMDGPUInstrInfo.h" #include "AMDGPUIntrinsicInfo.h" #include "AMDGPUSubtarget.h" -#include "R600ISelLowering.h" -#include "llvm/IR/DataLayout.h" namespace llvm { @@ -29,11 +25,8 @@ //===----------------------------------------------------------------------===// class AMDGPUTargetMachine : public LLVMTargetMachine { -private: - protected: std::unique_ptr TLOF; - AMDGPUSubtarget Subtarget; AMDGPUIntrinsicInfo IntrinsicInfo; public: @@ -43,10 +36,9 @@ CodeGenOpt::Level OL); ~AMDGPUTargetMachine(); - const AMDGPUSubtarget *getSubtargetImpl() const { return &Subtarget; } - const AMDGPUSubtarget *getSubtargetImpl(const Function &) const override { - return &Subtarget; - } + const AMDGPUSubtarget *getSubtargetImpl() const; + const AMDGPUSubtarget *getSubtargetImpl(const Function &) const override; + const AMDGPUIntrinsicInfo *getIntrinsicInfo() const override { return &IntrinsicInfo; } @@ -62,6 +54,8 @@ //===----------------------------------------------------------------------===// class R600TargetMachine final : public AMDGPUTargetMachine { +private: + R600Subtarget Subtarget; public: R600TargetMachine(const Target &T, const Triple &TT, StringRef CPU, @@ -70,6 +64,14 @@ CodeGenOpt::Level OL); TargetPassConfig *createPassConfig(PassManagerBase &PM) override; + + const R600Subtarget *getSubtargetImpl() const { + return &Subtarget; + } + + const R600Subtarget *getSubtargetImpl(const Function &) const override { + return &Subtarget; + } }; //===----------------------------------------------------------------------===// @@ -77,6 +79,8 @@ //===----------------------------------------------------------------------===// class GCNTargetMachine final : public AMDGPUTargetMachine { +private: + SISubtarget Subtarget; public: GCNTargetMachine(const Target &T, const Triple &TT, StringRef CPU, @@ -85,8 +89,29 @@ CodeGenOpt::Level OL); TargetPassConfig *createPassConfig(PassManagerBase &PM) override; + + const SISubtarget *getSubtargetImpl() const { + return &Subtarget; + } + + const SISubtarget *getSubtargetImpl(const Function &) const override { + return &Subtarget; + } }; +inline const AMDGPUSubtarget *AMDGPUTargetMachine::getSubtargetImpl() const { + if (getTargetTriple().getArch() == Triple::amdgcn) + return static_cast(this)->getSubtargetImpl(); + return static_cast(this)->getSubtargetImpl(); +} + +inline const AMDGPUSubtarget *AMDGPUTargetMachine::getSubtargetImpl( + const Function &F) const { + if (getTargetTriple().getArch() == Triple::amdgcn) + return static_cast(this)->getSubtargetImpl(F); + return static_cast(this)->getSubtargetImpl(F); +} + } // End namespace llvm #endif Index: lib/Target/AMDGPU/AMDGPUTargetMachine.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPUTargetMachine.cpp +++ lib/Target/AMDGPU/AMDGPUTargetMachine.cpp @@ -37,7 +37,6 @@ #include "llvm/Transforms/IPO.h" #include "llvm/Transforms/Scalar.h" #include "llvm/Transforms/Scalar/GVN.h" -#include "llvm/CodeGen/Passes.h" using namespace llvm; @@ -121,7 +120,7 @@ : LLVMTargetMachine(T, computeDataLayout(TT), TT, getGPUOrDefault(TT, CPU), FS, Options, getEffectiveRelocModel(RM), CM, OptLevel), TLOF(createTLOF(getTargetTriple())), - Subtarget(TT, getTargetCPU(), FS, *this), IntrinsicInfo() { + IntrinsicInfo() { setRequiresStructuredCFG(true); initAsmInfo(); } @@ -137,7 +136,8 @@ TargetOptions Options, Optional RM, CodeModel::Model CM, CodeGenOpt::Level OL) - : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {} + : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL), + Subtarget(TT, getTargetCPU(), FS, *this) {} //===----------------------------------------------------------------------===// // GCN Target Machine (SI+) @@ -148,7 +148,8 @@ TargetOptions Options, Optional RM, CodeModel::Model CM, CodeGenOpt::Level OL) - : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {} + : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL), + Subtarget(TT, getTargetCPU(), FS, *this) {} //===----------------------------------------------------------------------===// // AMDGPU Pass Setup @@ -171,16 +172,6 @@ return getTM(); } - ScheduleDAGInstrs * - createMachineScheduler(MachineSchedContext *C) const override { - const AMDGPUSubtarget &ST = *getAMDGPUTargetMachine().getSubtargetImpl(); - if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) - return createR600MachineScheduler(C); - else if (ST.enableSIScheduler()) - return createSIMachineScheduler(C); - return nullptr; - } - void addEarlyCSEOrGVNPass(); void addStraightLineScalarOptimizationPasses(); void addIRPasses() override; @@ -194,6 +185,11 @@ R600PassConfig(TargetMachine *TM, PassManagerBase &PM) : AMDGPUPassConfig(TM, PM) { } + ScheduleDAGInstrs *createMachineScheduler( + MachineSchedContext *C) const override { + return createR600MachineScheduler(C); + } + bool addPreISel() override; void addPreRegAlloc() override; void addPreSched2() override; @@ -204,6 +200,19 @@ public: GCNPassConfig(TargetMachine *TM, PassManagerBase &PM) : AMDGPUPassConfig(TM, PM) { } + + GCNTargetMachine &getGCNTargetMachine() const { + return getTM(); + } + + ScheduleDAGInstrs * + createMachineScheduler(MachineSchedContext *C) const override { + const SISubtarget *ST = getGCNTargetMachine().getSubtargetImpl(); + if (ST->enableSIScheduler()) + return createSIMachineScheduler(C); + return nullptr; + } + bool addPreISel() override; void addMachineSSAOptimization() override; bool addInstSelector() override; @@ -401,7 +410,7 @@ #endif void GCNPassConfig::addPreRegAlloc() { - const AMDGPUSubtarget &ST = *getAMDGPUTargetMachine().getSubtargetImpl(); + const SISubtarget &ST = *getGCNTargetMachine().getSubtargetImpl(); // This needs to be run directly before register allocation because // earlier passes might recompute live intervals. Index: lib/Target/AMDGPU/AMDILCFGStructurizer.cpp =================================================================== --- lib/Target/AMDGPU/AMDILCFGStructurizer.cpp +++ lib/Target/AMDGPU/AMDILCFGStructurizer.cpp @@ -160,7 +160,7 @@ bool prepare(); bool runOnMachineFunction(MachineFunction &MF) override { - TII = static_cast(MF.getSubtarget().getInstrInfo()); + TII = MF.getSubtarget().getInstrInfo(); TRI = &TII->getRegisterInfo(); DEBUG(MF.dump();); OrderedBlks.clear(); Index: lib/Target/AMDGPU/CMakeLists.txt =================================================================== --- lib/Target/AMDGPU/CMakeLists.txt +++ lib/Target/AMDGPU/CMakeLists.txt @@ -52,6 +52,7 @@ R600ControlFlowFinalizer.cpp R600EmitClauseMarkers.cpp R600ExpandSpecialInstrs.cpp + R600FrameLowering.cpp R600InstrInfo.cpp R600ISelLowering.cpp R600MachineFunctionInfo.cpp Index: lib/Target/AMDGPU/GCNHazardRecognizer.h =================================================================== --- lib/Target/AMDGPU/GCNHazardRecognizer.h +++ lib/Target/AMDGPU/GCNHazardRecognizer.h @@ -24,15 +24,16 @@ class MachineInstr; class ScheduleDAG; class SIInstrInfo; +class SISubtarget; class GCNHazardRecognizer final : public ScheduleHazardRecognizer { - - // This variable stores the instruction that has been emitted this cycle. - // It will be added to EmittedInstrs, when AdvanceCycle() or RecedeCycle() is + // This variable stores the instruction that has been emitted this cycle. It + // will be added to EmittedInstrs, when AdvanceCycle() or RecedeCycle() is // called. MachineInstr *CurrCycleInstr; std::list EmittedInstrs; const MachineFunction &MF; + const SISubtarget &ST; int getWaitStatesSinceDef(unsigned Reg, function_ref IsHazardDef = Index: lib/Target/AMDGPU/GCNHazardRecognizer.cpp =================================================================== --- lib/Target/AMDGPU/GCNHazardRecognizer.cpp +++ lib/Target/AMDGPU/GCNHazardRecognizer.cpp @@ -25,7 +25,8 @@ GCNHazardRecognizer::GCNHazardRecognizer(const MachineFunction &MF) : CurrCycleInstr(nullptr), - MF(MF) { + MF(MF), + ST(MF.getSubtarget()) { MaxLookAhead = 5; } @@ -81,8 +82,7 @@ if (!CurrCycleInstr) return; - const SIInstrInfo *TII = - static_cast(MF.getSubtarget().getInstrInfo()); + const SIInstrInfo *TII = ST.getInstrInfo(); unsigned NumWaitStates = TII->getNumWaitStates(*CurrCycleInstr); // Keep track of emitted instructions @@ -114,8 +114,7 @@ int GCNHazardRecognizer::getWaitStatesSinceDef( unsigned Reg, function_ref IsHazardDef) { - const TargetRegisterInfo *TRI = - MF.getSubtarget().getRegisterInfo(); + const SIRegisterInfo *TRI = ST.getRegisterInfo(); int WaitStates = -1; for (MachineInstr *MI : EmittedInstrs) { @@ -141,10 +140,8 @@ } int GCNHazardRecognizer::checkSMEMSoftClauseHazards(MachineInstr *SMEM) { - const AMDGPUSubtarget &ST = MF.getSubtarget(); - // SMEM soft clause are only present on VI+ - if (ST.getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS) + if (ST.getGeneration() < SISubtarget::VOLCANIC_ISLANDS) return 0; // A soft-clause is any group of consecutive SMEM instructions. The @@ -198,14 +195,14 @@ } int GCNHazardRecognizer::checkSMRDHazards(MachineInstr *SMRD) { - const AMDGPUSubtarget &ST = MF.getSubtarget(); - const SIInstrInfo *TII = static_cast(ST.getInstrInfo()); + const SISubtarget &ST = MF.getSubtarget(); + const SIInstrInfo *TII = ST.getInstrInfo(); int WaitStatesNeeded = 0; WaitStatesNeeded = checkSMEMSoftClauseHazards(SMRD); // This SMRD hazard only affects SI. - if (ST.getGeneration() != AMDGPUSubtarget::SOUTHERN_ISLANDS) + if (ST.getGeneration() != SISubtarget::SOUTHERN_ISLANDS) return WaitStatesNeeded; // A read of an SGPR by SMRD instruction requires 4 wait states when the @@ -224,10 +221,9 @@ } int GCNHazardRecognizer::checkVMEMHazards(MachineInstr* VMEM) { - const AMDGPUSubtarget &ST = MF.getSubtarget(); - const SIInstrInfo *TII = static_cast(ST.getInstrInfo()); + const SIInstrInfo *TII = ST.getInstrInfo(); - if (ST.getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS) + if (ST.getGeneration() < SISubtarget::VOLCANIC_ISLANDS) return 0; const SIRegisterInfo &TRI = TII->getRegisterInfo(); @@ -250,9 +246,7 @@ } int GCNHazardRecognizer::checkDPPHazards(MachineInstr *DPP) { - const AMDGPUSubtarget &ST = MF.getSubtarget(); - const SIRegisterInfo *TRI = - static_cast(ST.getRegisterInfo()); + const SIRegisterInfo *TRI = ST.getRegisterInfo(); // Check for DPP VGPR read after VALU VGPR write. int DppVgprWaitStates = 2; Index: lib/Target/AMDGPU/R600ClauseMergePass.cpp =================================================================== --- lib/Target/AMDGPU/R600ClauseMergePass.cpp +++ lib/Target/AMDGPU/R600ClauseMergePass.cpp @@ -171,7 +171,9 @@ if (skipFunction(*MF.getFunction())) return false; - TII = static_cast(MF.getSubtarget().getInstrInfo()); + const R600Subtarget &ST = MF.getSubtarget(); + TII = ST.getInstrInfo(); + for (MachineFunction::iterator BB = MF.begin(), BB_E = MF.end(); BB != BB_E; ++BB) { MachineBasicBlock &MBB = *BB; Index: lib/Target/AMDGPU/R600ControlFlowFinalizer.cpp =================================================================== --- lib/Target/AMDGPU/R600ControlFlowFinalizer.cpp +++ lib/Target/AMDGPU/R600ControlFlowFinalizer.cpp @@ -39,14 +39,14 @@ FIRST_NON_WQM_PUSH_W_FULL_ENTRY = 3 }; - const AMDGPUSubtarget *ST; + const R600Subtarget *ST; std::vector BranchStack; std::vector LoopStack; unsigned MaxStackSize; unsigned CurrentEntries; unsigned CurrentSubEntries; - CFStack(const AMDGPUSubtarget *st, CallingConv::ID cc) : ST(st), + CFStack(const R600Subtarget *st, CallingConv::ID cc) : ST(st), // We need to reserve a stack entry for CALL_FS in vertex shaders. MaxStackSize(cc == CallingConv::AMDGPU_VS ? 1 : 0), CurrentEntries(0), CurrentSubEntries(0) { } @@ -119,7 +119,7 @@ return 0; case CFStack::FIRST_NON_WQM_PUSH: assert(!ST->hasCaymanISA()); - if (ST->getGeneration() <= AMDGPUSubtarget::R700) { + if (ST->getGeneration() <= R600Subtarget::R700) { // +1 For the push operation. // +2 Extra space required. return 3; @@ -132,7 +132,7 @@ return 2; } case CFStack::FIRST_NON_WQM_PUSH_W_FULL_ENTRY: - assert(ST->getGeneration() >= AMDGPUSubtarget::EVERGREEN); + assert(ST->getGeneration() >= R600Subtarget::EVERGREEN); // +1 For the push operation. // +1 Extra space required. return 2; @@ -159,7 +159,7 @@ // See comment in // CFStack::getSubEntrySize() else if (CurrentEntries > 0 && - ST->getGeneration() > AMDGPUSubtarget::EVERGREEN && + ST->getGeneration() > R600Subtarget::EVERGREEN && !ST->hasCaymanISA() && !branchStackContains(CFStack::FIRST_NON_WQM_PUSH_W_FULL_ENTRY)) Item = CFStack::FIRST_NON_WQM_PUSH_W_FULL_ENTRY; @@ -220,7 +220,7 @@ const R600InstrInfo *TII; const R600RegisterInfo *TRI; unsigned MaxFetchInst; - const AMDGPUSubtarget *ST; + const R600Subtarget *ST; bool IsTrivialInst(MachineInstr *MI) const { switch (MI->getOpcode()) { @@ -234,7 +234,7 @@ const MCInstrDesc &getHWInstrDesc(ControlFlowInstruction CFI) const { unsigned Opcode = 0; - bool isEg = (ST->getGeneration() >= AMDGPUSubtarget::EVERGREEN); + bool isEg = (ST->getGeneration() >= R600Subtarget::EVERGREEN); switch (CFI) { case CF_TC: Opcode = isEg ? AMDGPU::CF_TC_EG : AMDGPU::CF_TC_R600; @@ -491,10 +491,11 @@ : MachineFunctionPass(ID), TII(nullptr), TRI(nullptr), ST(nullptr) {} bool runOnMachineFunction(MachineFunction &MF) override { - ST = &MF.getSubtarget(); + ST = &MF.getSubtarget(); MaxFetchInst = ST->getTexVTXClauseSize(); - TII = static_cast(ST->getInstrInfo()); - TRI = static_cast(ST->getRegisterInfo()); + TII = ST->getInstrInfo(); + TRI = ST->getRegisterInfo(); + R600MachineFunctionInfo *MFI = MF.getInfo(); CFStack CFStack(ST, MF.getFunction()->getCallingConv()); Index: lib/Target/AMDGPU/R600EmitClauseMarkers.cpp =================================================================== --- lib/Target/AMDGPU/R600EmitClauseMarkers.cpp +++ lib/Target/AMDGPU/R600EmitClauseMarkers.cpp @@ -298,7 +298,8 @@ } bool runOnMachineFunction(MachineFunction &MF) override { - TII = static_cast(MF.getSubtarget().getInstrInfo()); + const R600Subtarget &ST = MF.getSubtarget(); + TII = ST.getInstrInfo(); for (MachineFunction::iterator BB = MF.begin(), BB_E = MF.end(); BB != BB_E; ++BB) { Index: lib/Target/AMDGPU/R600ExpandSpecialInstrs.cpp =================================================================== --- lib/Target/AMDGPU/R600ExpandSpecialInstrs.cpp +++ lib/Target/AMDGPU/R600ExpandSpecialInstrs.cpp @@ -29,7 +29,6 @@ namespace { class R600ExpandSpecialInstrsPass : public MachineFunctionPass { - private: static char ID; const R600InstrInfo *TII; @@ -66,7 +65,8 @@ } bool R600ExpandSpecialInstrsPass::runOnMachineFunction(MachineFunction &MF) { - TII = static_cast(MF.getSubtarget().getInstrInfo()); + const R600Subtarget &ST = MF.getSubtarget(); + TII = ST.getInstrInfo(); const R600RegisterInfo &TRI = TII->getRegisterInfo(); Index: lib/Target/AMDGPU/R600FrameLowering.h =================================================================== --- /dev/null +++ lib/Target/AMDGPU/R600FrameLowering.h @@ -0,0 +1,30 @@ +//===--------------------- R600FrameLowering.h ------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIB_TARGET_AMDGPU_R600FRAMELOWERING_H +#define LLVM_LIB_TARGET_AMDGPU_R600FRAMELOWERING_H + +#include "AMDGPUFrameLowering.h" + +namespace llvm { + +class R600FrameLowering : public AMDGPUFrameLowering { +public: + R600FrameLowering(StackDirection D, unsigned StackAl, int LAO, + unsigned TransAl = 1) : + AMDGPUFrameLowering(D, StackAl, LAO, TransAl) {} + virtual ~R600FrameLowering(); + + void emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const {} + void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const {} +}; + +} + +#endif Index: lib/Target/AMDGPU/R600FrameLowering.cpp =================================================================== --- /dev/null +++ lib/Target/AMDGPU/R600FrameLowering.cpp @@ -0,0 +1,15 @@ +//===----------------------- R600FrameLowering.cpp ------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//==-----------------------------------------------------------------------===// + +#include "R600FrameLowering.h" + +using namespace llvm; + +R600FrameLowering::~R600FrameLowering() { +} Index: lib/Target/AMDGPU/R600ISelLowering.h =================================================================== --- lib/Target/AMDGPU/R600ISelLowering.h +++ lib/Target/AMDGPU/R600ISelLowering.h @@ -20,10 +20,14 @@ namespace llvm { class R600InstrInfo; +class R600Subtarget; class R600TargetLowering final : public AMDGPUTargetLowering { public: - R600TargetLowering(TargetMachine &TM, const AMDGPUSubtarget &STI); + R600TargetLowering(const TargetMachine &TM, const R600Subtarget &STI); + + const R600Subtarget *getSubtarget() const; + MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock * BB) const override; SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override; @@ -86,6 +90,10 @@ bool isHWTrueValue(SDValue Op) const; bool isHWFalseValue(SDValue Op) const; + bool FoldOperand(SDNode *ParentNode, unsigned SrcIdx, SDValue &Src, + SDValue &Neg, SDValue &Abs, SDValue &Sel, SDValue &Imm, + SelectionDAG &DAG) const; + SDNode *PostISelFolding(MachineSDNode *N, SelectionDAG &DAG) const override; }; Index: lib/Target/AMDGPU/R600ISelLowering.cpp =================================================================== --- lib/Target/AMDGPU/R600ISelLowering.cpp +++ lib/Target/AMDGPU/R600ISelLowering.cpp @@ -30,8 +30,8 @@ using namespace llvm; -R600TargetLowering::R600TargetLowering(TargetMachine &TM, - const AMDGPUSubtarget &STI) +R600TargetLowering::R600TargetLowering(const TargetMachine &TM, + const R600Subtarget &STI) : AMDGPUTargetLowering(TM, STI), Gen(STI.getGeneration()) { addRegisterClass(MVT::f32, &AMDGPU::R600_Reg32RegClass); addRegisterClass(MVT::i32, &AMDGPU::R600_Reg32RegClass); @@ -199,6 +199,10 @@ setTargetDAGCombine(ISD::INSERT_VECTOR_ELT); } +const R600Subtarget *R600TargetLowering::getSubtarget() const { + return static_cast(Subtarget); +} + static inline bool isEOP(MachineBasicBlock::iterator I) { return std::next(I)->getOpcode() == AMDGPU::RETURN; } @@ -208,8 +212,7 @@ MachineFunction * MF = BB->getParent(); MachineRegisterInfo &MRI = MF->getRegInfo(); MachineBasicBlock::iterator I = *MI; - const R600InstrInfo *TII = - static_cast(Subtarget->getInstrInfo()); + const R600InstrInfo *TII = getSubtarget()->getInstrInfo(); switch (MI->getOpcode()) { default: @@ -966,7 +969,7 @@ SDValue TrigVal = DAG.getNode(TrigNode, DL, VT, DAG.getNode(ISD::FADD, DL, VT, FractPart, DAG.getConstantFP(-0.5, DL, MVT::f32))); - if (Gen >= AMDGPUSubtarget::R700) + if (Gen >= R600Subtarget::R700) return TrigVal; // On R600 hw, COS/SIN input must be between -Pi and Pi. return DAG.getNode(ISD::FMUL, DL, VT, TrigVal, @@ -1439,8 +1442,7 @@ // Lowering for indirect addressing const MachineFunction &MF = DAG.getMachineFunction(); - const AMDGPUFrameLowering *TFL = - static_cast(Subtarget->getFrameLowering()); + const R600FrameLowering *TFL = getSubtarget()->getFrameLowering(); unsigned StackWidth = TFL->getStackWidth(MF); Ptr = stackPtrToRegIndex(Ptr, StackWidth, DAG); @@ -1677,8 +1679,7 @@ // Lowering for indirect addressing const MachineFunction &MF = DAG.getMachineFunction(); - const AMDGPUFrameLowering *TFL = - static_cast(Subtarget->getFrameLowering()); + const R600FrameLowering *TFL = getSubtarget()->getFrameLowering(); unsigned StackWidth = TFL->getStackWidth(MF); Ptr = stackPtrToRegIndex(Ptr, StackWidth, DAG); @@ -1731,7 +1732,7 @@ SDValue R600TargetLowering::lowerFrameIndex(SDValue Op, SelectionDAG &DAG) const { MachineFunction &MF = DAG.getMachineFunction(); - const AMDGPUFrameLowering *TFL = Subtarget->getFrameLowering(); + const R600FrameLowering *TFL = getSubtarget()->getFrameLowering(); FrameIndexSDNode *FIN = cast(Op); @@ -2179,13 +2180,14 @@ return AMDGPUTargetLowering::PerformDAGCombine(N, DCI); } -static bool -FoldOperand(SDNode *ParentNode, unsigned SrcIdx, SDValue &Src, SDValue &Neg, - SDValue &Abs, SDValue &Sel, SDValue &Imm, SelectionDAG &DAG) { - const R600InstrInfo *TII = - static_cast(DAG.getSubtarget().getInstrInfo()); +bool R600TargetLowering::FoldOperand(SDNode *ParentNode, unsigned SrcIdx, + SDValue &Src, SDValue &Neg, SDValue &Abs, + SDValue &Sel, SDValue &Imm, + SelectionDAG &DAG) const { + const R600InstrInfo *TII = getSubtarget()->getInstrInfo(); if (!Src.isMachineOpcode()) return false; + switch (Src.getMachineOpcode()) { case AMDGPU::FNEG_R600: if (!Neg.getNode()) @@ -2310,14 +2312,13 @@ } } - /// \brief Fold the instructions after selecting them SDNode *R600TargetLowering::PostISelFolding(MachineSDNode *Node, SelectionDAG &DAG) const { - const R600InstrInfo *TII = - static_cast(DAG.getSubtarget().getInstrInfo()); + const R600InstrInfo *TII = getSubtarget()->getInstrInfo(); if (!Node->isMachineOpcode()) return Node; + unsigned Opcode = Node->getMachineOpcode(); SDValue FakeOp; Index: lib/Target/AMDGPU/R600InstrInfo.h =================================================================== --- lib/Target/AMDGPU/R600InstrInfo.h +++ lib/Target/AMDGPU/R600InstrInfo.h @@ -16,23 +16,25 @@ #define LLVM_LIB_TARGET_AMDGPU_R600INSTRINFO_H #include "AMDGPUInstrInfo.h" -#include "R600Defines.h" #include "R600RegisterInfo.h" namespace llvm { - class AMDGPUTargetMachine; - class DFAPacketizer; - class MachineFunction; - class MachineInstr; - class MachineInstrBuilder; - - class R600InstrInfo final : public AMDGPUInstrInfo { - private: +class AMDGPUTargetMachine; +class DFAPacketizer; +class MachineFunction; +class MachineInstr; +class MachineInstrBuilder; +class R600Subtarget; + +class R600InstrInfo final : public AMDGPUInstrInfo { +private: const R600RegisterInfo RI; + const R600Subtarget &ST; - std::vector > - ExtractSrcs(MachineInstr *MI, const DenseMap &PV, unsigned &ConstCount) const; - + std::vector> + ExtractSrcs(MachineInstr *MI, + const DenseMap &PV, + unsigned &ConstCount) const; MachineInstrBuilder buildIndirectRead(MachineBasicBlock *MBB, MachineBasicBlock::iterator I, @@ -41,11 +43,11 @@ unsigned AddrChan) const; MachineInstrBuilder buildIndirectWrite(MachineBasicBlock *MBB, - MachineBasicBlock::iterator I, - unsigned ValueReg, unsigned Address, - unsigned OffsetReg, - unsigned AddrChan) const; - public: + MachineBasicBlock::iterator I, + unsigned ValueReg, unsigned Address, + unsigned OffsetReg, + unsigned AddrChan) const; +public: enum BankSwizzle { ALU_VEC_012_SCL_210 = 0, ALU_VEC_021_SCL_122, @@ -55,9 +57,12 @@ ALU_VEC_210 }; - explicit R600InstrInfo(const AMDGPUSubtarget &st); + explicit R600InstrInfo(const R600Subtarget &); + + const R600RegisterInfo &getRegisterInfo() const { + return RI; + } - const R600RegisterInfo &getRegisterInfo() const override; void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, unsigned DestReg, unsigned SrcReg, bool KillSrc) const override; @@ -109,7 +114,7 @@ /// If register is ALU_LITERAL, second member is IMM. /// Otherwise, second member value is undefined. SmallVector, 3> - getSrcs(MachineInstr *MI) const; + getSrcs(MachineInstr *MI) const; unsigned isLegalUpTo( const std::vector > > &IGSrcs, @@ -153,10 +158,14 @@ DFAPacketizer * CreateTargetScheduleState(const TargetSubtargetInfo &) const override; - bool ReverseBranchCondition(SmallVectorImpl &Cond) const override; + bool ReverseBranchCondition( + SmallVectorImpl &Cond) const override; - bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, - SmallVectorImpl &Cond, bool AllowModify) const override; + bool AnalyzeBranch(MachineBasicBlock &MBB, + MachineBasicBlock *&TBB, + MachineBasicBlock *&FBB, + SmallVectorImpl &Cond, + bool AllowModify) const override; unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef Cond, @@ -168,20 +177,18 @@ bool isPredicable(MachineInstr &MI) const override; - bool - isProfitableToDupForIfCvt(MachineBasicBlock &MBB, unsigned NumCyles, - BranchProbability Probability) const override; + bool isProfitableToDupForIfCvt(MachineBasicBlock &MBB, unsigned NumCyles, + BranchProbability Probability) const override; bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCyles, unsigned ExtraPredCycles, BranchProbability Probability) const override ; - bool - isProfitableToIfCvt(MachineBasicBlock &TMBB, - unsigned NumTCycles, unsigned ExtraTCycles, - MachineBasicBlock &FMBB, - unsigned NumFCycles, unsigned ExtraFCycles, - BranchProbability Probability) const override; + bool isProfitableToIfCvt(MachineBasicBlock &TMBB, + unsigned NumTCycles, unsigned ExtraTCycles, + MachineBasicBlock &FMBB, + unsigned NumFCycles, unsigned ExtraFCycles, + BranchProbability Probability) const override; bool DefinesPredicate(MachineInstr &MI, std::vector &Pred) const override; @@ -190,7 +197,7 @@ ArrayRef Pred2) const override; bool isProfitableToUnpredicate(MachineBasicBlock &TMBB, - MachineBasicBlock &FMBB) const override; + MachineBasicBlock &FMBB) const override; bool PredicateInstruction(MachineInstr &MI, ArrayRef Pred) const override; @@ -240,10 +247,10 @@ unsigned getMaxAlusPerClause() const; - ///buildDefaultInstruction - This function returns a MachineInstr with - /// all the instruction modifiers initialized to their default values. - /// You can use this function to avoid manually specifying each instruction - /// modifier operand when building a new instruction. + /// buildDefaultInstruction - This function returns a MachineInstr with all + /// the instruction modifiers initialized to their default values. You can + /// use this function to avoid manually specifying each instruction modifier + /// operand when building a new instruction. /// /// \returns a MachineInstr with all the instruction modifiers initialized /// to their default values. @@ -260,9 +267,9 @@ unsigned DstReg) const; MachineInstr *buildMovImm(MachineBasicBlock &BB, - MachineBasicBlock::iterator I, - unsigned DstReg, - uint64_t Imm) const; + MachineBasicBlock::iterator I, + unsigned DstReg, + uint64_t Imm) const; MachineInstr *buildMovInstr(MachineBasicBlock *MBB, MachineBasicBlock::iterator I, @@ -303,7 +310,6 @@ // Helper functions that check the opcode for status information bool isRegisterStore(const MachineInstr &MI) const; bool isRegisterLoad(const MachineInstr &MI) const; - }; namespace AMDGPU { Index: lib/Target/AMDGPU/R600InstrInfo.cpp =================================================================== --- lib/Target/AMDGPU/R600InstrInfo.cpp +++ lib/Target/AMDGPU/R600InstrInfo.cpp @@ -28,12 +28,8 @@ #define GET_INSTRINFO_CTOR_DTOR #include "AMDGPUGenDFAPacketizer.inc" -R600InstrInfo::R600InstrInfo(const AMDGPUSubtarget &st) - : AMDGPUInstrInfo(st), RI() {} - -const R600RegisterInfo &R600InstrInfo::getRegisterInfo() const { - return RI; -} +R600InstrInfo::R600InstrInfo(const R600Subtarget &ST) + : AMDGPUInstrInfo(ST), RI(), ST(ST) {} bool R600InstrInfo::isTrig(const MachineInstr &MI) const { return get(MI.getOpcode()).TSFlags & R600_InstFlag::TRIG; @@ -90,10 +86,9 @@ } bool R600InstrInfo::isMov(unsigned Opcode) const { - - switch(Opcode) { - default: return false; + default: + return false; case AMDGPU::MOV: case AMDGPU::MOV_IMM_F32: case AMDGPU::MOV_IMM_I32: @@ -651,7 +646,7 @@ DFAPacketizer * R600InstrInfo::CreateTargetScheduleState(const TargetSubtargetInfo &STI) const { const InstrItineraryData *II = STI.getInstrItineraryData(); - return static_cast(STI).createDFAPacketizer(II); + return static_cast(STI).createDFAPacketizer(II); } static bool @@ -1113,8 +1108,8 @@ void R600InstrInfo::reserveIndirectRegisters(BitVector &Reserved, const MachineFunction &MF) const { - const AMDGPUFrameLowering *TFL = static_cast( - MF.getSubtarget().getFrameLowering()); + const R600Subtarget &ST = MF.getSubtarget(); + const R600FrameLowering *TFL = ST.getFrameLowering(); unsigned StackWidth = TFL->getStackWidth(MF); int End = getIndirectIndexEnd(MF); @@ -1290,7 +1285,7 @@ const { assert (MI->getOpcode() == AMDGPU::DOT_4 && "Not Implemented"); unsigned Opcode; - if (ST.getGeneration() <= AMDGPUSubtarget::R700) + if (ST.getGeneration() <= R600Subtarget::R700) Opcode = AMDGPU::DOT4_r600; else Opcode = AMDGPU::DOT4_eg; Index: lib/Target/AMDGPU/R600Instructions.td =================================================================== --- lib/Target/AMDGPU/R600Instructions.td +++ lib/Target/AMDGPU/R600Instructions.td @@ -336,11 +336,11 @@ def load_param_exti8 : LoadParamFrag; def load_param_exti16 : LoadParamFrag; -def isR600 : Predicate<"Subtarget->getGeneration() <= AMDGPUSubtarget::R700">; +def isR600 : Predicate<"Subtarget->getGeneration() <= R600Subtarget::R700">; def isR600toCayman : Predicate< - "Subtarget->getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS">; + "Subtarget->getGeneration() <= R600Subtarget::NORTHERN_ISLANDS">; //===----------------------------------------------------------------------===// // R600 SDNodes Index: lib/Target/AMDGPU/R600MachineScheduler.h =================================================================== --- lib/Target/AMDGPU/R600MachineScheduler.h +++ lib/Target/AMDGPU/R600MachineScheduler.h @@ -15,15 +15,16 @@ #ifndef LLVM_LIB_TARGET_AMDGPU_R600MACHINESCHEDULER_H #define LLVM_LIB_TARGET_AMDGPU_R600MACHINESCHEDULER_H -#include "R600InstrInfo.h" #include "llvm/CodeGen/MachineScheduler.h" using namespace llvm; namespace llvm { -class R600SchedStrategy final : public MachineSchedStrategy { +class R600InstrInfo; +struct R600RegisterInfo; +class R600SchedStrategy final : public MachineSchedStrategy { const ScheduleDAGMILive *DAG; const R600InstrInfo *TII; const R600RegisterInfo *TRI; Index: lib/Target/AMDGPU/R600MachineScheduler.cpp =================================================================== --- lib/Target/AMDGPU/R600MachineScheduler.cpp +++ lib/Target/AMDGPU/R600MachineScheduler.cpp @@ -13,6 +13,7 @@ //===----------------------------------------------------------------------===// #include "R600MachineScheduler.h" +#include "R600InstrInfo.h" #include "AMDGPUSubtarget.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/Pass.h" @@ -26,7 +27,7 @@ void R600SchedStrategy::initialize(ScheduleDAGMI *dag) { assert(dag->hasVRegLiveness() && "R600SchedStrategy needs vreg liveness"); DAG = static_cast(dag); - const AMDGPUSubtarget &ST = DAG->MF.getSubtarget(); + const R600Subtarget &ST = DAG->MF.getSubtarget(); TII = static_cast(DAG->TII); TRI = static_cast(DAG->TRI); VLIW5 = !ST.hasCaymanISA(); @@ -48,8 +49,7 @@ QSrc.clear(); } -static -unsigned getWFCountLimitedByGPR(unsigned GPRCount) { +static unsigned getWFCountLimitedByGPR(unsigned GPRCount) { assert (GPRCount && "GPRCount cannot be 0"); return 248 / GPRCount; } @@ -349,7 +349,7 @@ DEBUG(dbgs() << "New Slot\n"); assert (OccupedSlotsMask && "Slot wasn't filled"); OccupedSlotsMask = 0; -// if (HwGen == AMDGPUSubtarget::NORTHERN_ISLANDS) +// if (HwGen == R600Subtarget::NORTHERN_ISLANDS) // OccupedSlotsMask |= 16; InstructionsGroupCandidate.clear(); LoadAlu(); Index: lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp =================================================================== --- lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp +++ lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp @@ -29,6 +29,7 @@ #include "AMDGPU.h" #include "AMDGPUSubtarget.h" +#include "R600Defines.h" #include "R600InstrInfo.h" #include "llvm/CodeGen/DFAPacketizer.h" #include "llvm/CodeGen/MachineDominators.h" @@ -317,8 +318,10 @@ if (skipFunction(*Fn.getFunction())) return false; - TII = static_cast(Fn.getSubtarget().getInstrInfo()); - MRI = &(Fn.getRegInfo()); + const R600Subtarget &ST = Fn.getSubtarget(); + TII = ST.getInstrInfo(); + MRI = &Fn.getRegInfo(); + for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end(); MBB != MBBe; ++MBB) { MachineBasicBlock *MB = &*MBB; Index: lib/Target/AMDGPU/R600Packetizer.cpp =================================================================== --- lib/Target/AMDGPU/R600Packetizer.cpp +++ lib/Target/AMDGPU/R600Packetizer.cpp @@ -56,7 +56,6 @@ char R600Packetizer::ID = 0; class R600PacketizerList : public VLIWPacketizerList { - private: const R600InstrInfo *TII; const R600RegisterInfo &TRI; @@ -148,12 +147,12 @@ } public: // Ctor. - R600PacketizerList(MachineFunction &MF, MachineLoopInfo &MLI) + R600PacketizerList(MachineFunction &MF, const R600Subtarget &ST, + MachineLoopInfo &MLI) : VLIWPacketizerList(MF, MLI, nullptr), - TII(static_cast( - MF.getSubtarget().getInstrInfo())), + TII(ST.getInstrInfo()), TRI(TII->getRegisterInfo()) { - VLIW5 = !MF.getSubtarget().hasCaymanISA(); + VLIW5 = !ST.hasCaymanISA(); } // initPacketizerState - initialize some internal flags. @@ -327,11 +326,13 @@ }; bool R600Packetizer::runOnMachineFunction(MachineFunction &Fn) { - const TargetInstrInfo *TII = Fn.getSubtarget().getInstrInfo(); + const R600Subtarget &ST = Fn.getSubtarget(); + const R600InstrInfo *TII = ST.getInstrInfo(); + MachineLoopInfo &MLI = getAnalysis(); // Instantiate the packetizer. - R600PacketizerList Packetizer(Fn, MLI); + R600PacketizerList Packetizer(Fn, ST, MLI); // DFA state table should not be empty. assert(Packetizer.getResourceTracker() && "Empty DFA table!"); Index: lib/Target/AMDGPU/R600RegisterInfo.h =================================================================== --- lib/Target/AMDGPU/R600RegisterInfo.h +++ lib/Target/AMDGPU/R600RegisterInfo.h @@ -31,7 +31,7 @@ /// \brief get the HW encoding for a register's channel. unsigned getHWRegChan(unsigned reg) const; - unsigned getHWRegIndex(unsigned Reg) const override; + unsigned getHWRegIndex(unsigned Reg) const; /// \brief get the register class of the specified type to use in the /// CFGStructurizer Index: lib/Target/AMDGPU/R600RegisterInfo.cpp =================================================================== --- lib/Target/AMDGPU/R600RegisterInfo.cpp +++ lib/Target/AMDGPU/R600RegisterInfo.cpp @@ -28,8 +28,8 @@ BitVector R600RegisterInfo::getReservedRegs(const MachineFunction &MF) const { BitVector Reserved(getNumRegs()); - const R600InstrInfo *TII = - static_cast(MF.getSubtarget().getInstrInfo()); + const R600Subtarget &ST = MF.getSubtarget(); + const R600InstrInfo *TII = ST.getInstrInfo(); Reserved.set(AMDGPU::ZERO); Reserved.set(AMDGPU::HALF); Index: lib/Target/AMDGPU/SIDebuggerInsertNops.cpp =================================================================== --- lib/Target/AMDGPU/SIDebuggerInsertNops.cpp +++ lib/Target/AMDGPU/SIDebuggerInsertNops.cpp @@ -20,6 +20,7 @@ //===----------------------------------------------------------------------===// #include "SIInstrInfo.h" +#include "AMDGPUSubtarget.h" #include "llvm/ADT/DenseSet.h" #include "llvm/CodeGen/MachineFunction.h" #include "llvm/CodeGen/MachineFunctionPass.h" @@ -61,7 +62,7 @@ bool SIDebuggerInsertNops::runOnMachineFunction(MachineFunction &MF) { // Skip this pass if "amdgpu-debugger-insert-nops" attribute was not // specified. - const AMDGPUSubtarget &ST = MF.getSubtarget(); + const SISubtarget &ST = MF.getSubtarget(); if (!ST.debuggerInsertNops()) return false; @@ -70,8 +71,7 @@ return false; // Target instruction info. - const SIInstrInfo *TII = - static_cast(MF.getSubtarget().getInstrInfo()); + const SIInstrInfo *TII = ST.getInstrInfo(); // Set containing line numbers that have nop inserted. DenseSet NopInserted; Index: lib/Target/AMDGPU/SIDefines.h =================================================================== --- lib/Target/AMDGPU/SIDefines.h +++ lib/Target/AMDGPU/SIDefines.h @@ -48,7 +48,7 @@ namespace AMDGPU { enum OperandType { /// Operand with register or 32-bit immediate - OPERAND_REG_IMM32 = llvm::MCOI::OPERAND_FIRST_TARGET, + OPERAND_REG_IMM32 = MCOI::OPERAND_FIRST_TARGET, /// Operand with register or inline constant OPERAND_REG_INLINE_C }; Index: lib/Target/AMDGPU/SIFixSGPRCopies.cpp =================================================================== --- lib/Target/AMDGPU/SIFixSGPRCopies.cpp +++ lib/Target/AMDGPU/SIFixSGPRCopies.cpp @@ -237,11 +237,10 @@ } bool SIFixSGPRCopies::runOnMachineFunction(MachineFunction &MF) { + const SISubtarget &ST = MF.getSubtarget(); MachineRegisterInfo &MRI = MF.getRegInfo(); - const SIRegisterInfo *TRI = - static_cast(MF.getSubtarget().getRegisterInfo()); - const SIInstrInfo *TII = - static_cast(MF.getSubtarget().getInstrInfo()); + const SIRegisterInfo *TRI = ST.getRegisterInfo(); + const SIInstrInfo *TII = ST.getInstrInfo(); SmallVector Worklist; Index: lib/Target/AMDGPU/SIFoldOperands.cpp =================================================================== --- lib/Target/AMDGPU/SIFoldOperands.cpp +++ lib/Target/AMDGPU/SIFoldOperands.cpp @@ -16,8 +16,6 @@ #include "llvm/CodeGen/MachineFunctionPass.h" #include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/MachineRegisterInfo.h" -#include "llvm/IR/Function.h" -#include "llvm/IR/LLVMContext.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Target/TargetMachine.h" @@ -298,9 +296,10 @@ if (skipFunction(*MF.getFunction())) return false; + const SISubtarget &ST = MF.getSubtarget(); + MachineRegisterInfo &MRI = MF.getRegInfo(); - const SIInstrInfo *TII = - static_cast(MF.getSubtarget().getInstrInfo()); + const SIInstrInfo *TII = ST.getInstrInfo(); const SIRegisterInfo &TRI = TII->getRegisterInfo(); for (MachineFunction::iterator BI = MF.begin(), BE = MF.end(); Index: lib/Target/AMDGPU/SIFrameLowering.h =================================================================== --- lib/Target/AMDGPU/SIFrameLowering.h +++ lib/Target/AMDGPU/SIFrameLowering.h @@ -23,6 +23,8 @@ void emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const override; + void emitEpilogue(MachineFunction &MF, + MachineBasicBlock &MBB) const override; void processFunctionBeforeFrameFinalized( MachineFunction &MF, Index: lib/Target/AMDGPU/SIFrameLowering.cpp =================================================================== --- lib/Target/AMDGPU/SIFrameLowering.cpp +++ lib/Target/AMDGPU/SIFrameLowering.cpp @@ -11,6 +11,8 @@ #include "SIInstrInfo.h" #include "SIMachineFunctionInfo.h" #include "SIRegisterInfo.h" +#include "AMDGPUSubtarget.h" + #include "llvm/CodeGen/MachineFrameInfo.h" #include "llvm/CodeGen/MachineFunction.h" #include "llvm/CodeGen/MachineInstrBuilder.h" @@ -52,10 +54,9 @@ if (hasOnlySGPRSpills(MFI, MF.getFrameInfo())) return; - const SIInstrInfo *TII = - static_cast(MF.getSubtarget().getInstrInfo()); + const SISubtarget &ST = MF.getSubtarget(); + const SIInstrInfo *TII = ST.getInstrInfo(); const SIRegisterInfo *TRI = &TII->getRegisterInfo(); - const AMDGPUSubtarget &ST = MF.getSubtarget(); MachineRegisterInfo &MRI = MF.getRegInfo(); MachineBasicBlock::iterator I = MBB.begin(); @@ -263,6 +264,11 @@ } } +void SIFrameLowering::emitEpilogue(MachineFunction &MF, + MachineBasicBlock &MBB) const { + +} + void SIFrameLowering::processFunctionBeforeFrameFinalized( MachineFunction &MF, RegScavenger *RS) const { Index: lib/Target/AMDGPU/SIISelLowering.h =================================================================== --- lib/Target/AMDGPU/SIISelLowering.h +++ lib/Target/AMDGPU/SIISelLowering.h @@ -71,7 +71,9 @@ bool isCFIntrinsic(const SDNode *Intr) const; public: - SITargetLowering(TargetMachine &tm, const AMDGPUSubtarget &STI); + SITargetLowering(const TargetMachine &tm, const SISubtarget &STI); + + const SISubtarget *getSubtarget() const; bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &, unsigned IntrinsicID) const override; Index: lib/Target/AMDGPU/SIISelLowering.cpp =================================================================== --- lib/Target/AMDGPU/SIISelLowering.cpp +++ lib/Target/AMDGPU/SIISelLowering.cpp @@ -52,8 +52,8 @@ llvm_unreachable("Cannot allocate sgpr"); } -SITargetLowering::SITargetLowering(TargetMachine &TM, - const AMDGPUSubtarget &STI) +SITargetLowering::SITargetLowering(const TargetMachine &TM, + const SISubtarget &STI) : AMDGPUTargetLowering(TM, STI) { addRegisterClass(MVT::i1, &AMDGPU::VReg_1RegClass); addRegisterClass(MVT::i64, &AMDGPU::SReg_64RegClass); @@ -190,7 +190,7 @@ setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i32, Expand); setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i64, Expand); - if (Subtarget->hasFlatAddressSpace()) { + if (getSubtarget()->hasFlatAddressSpace()) { setOperationAction(ISD::ADDRSPACECAST, MVT::i32, Custom); setOperationAction(ISD::ADDRSPACECAST, MVT::i64, Custom); } @@ -205,7 +205,7 @@ setOperationAction(ISD::FMINNUM, MVT::f64, Legal); setOperationAction(ISD::FMAXNUM, MVT::f64, Legal); - if (Subtarget->getGeneration() >= AMDGPUSubtarget::SEA_ISLANDS) { + if (Subtarget->getGeneration() >= SISubtarget::SEA_ISLANDS) { setOperationAction(ISD::FTRUNC, MVT::f64, Legal); setOperationAction(ISD::FCEIL, MVT::f64, Legal); setOperationAction(ISD::FRINT, MVT::f64, Legal); @@ -255,6 +255,10 @@ setSchedulingPreference(Sched::RegPressure); } +const SISubtarget *SITargetLowering::getSubtarget() const { + return static_cast(Subtarget); +} + //===----------------------------------------------------------------------===// // TargetLowering queries //===----------------------------------------------------------------------===// @@ -335,7 +339,7 @@ switch (AS) { case AMDGPUAS::GLOBAL_ADDRESS: { - if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) { + if (Subtarget->getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) { // Assume the we will use FLAT for all global memory accesses // on VI. // FIXME: This assumption is currently wrong. On VI we still use @@ -363,16 +367,16 @@ if (DL.getTypeStoreSize(Ty) < 4) return isLegalMUBUFAddressingMode(AM); - if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS) { + if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS) { // SMRD instructions have an 8-bit, dword offset on SI. if (!isUInt<8>(AM.BaseOffs / 4)) return false; - } else if (Subtarget->getGeneration() == AMDGPUSubtarget::SEA_ISLANDS) { + } else if (Subtarget->getGeneration() == SISubtarget::SEA_ISLANDS) { // On CI+, this can also be a 32-bit literal constant offset. If it fits // in 8-bits, it can use a smaller encoding. if (!isUInt<32>(AM.BaseOffs / 4)) return false; - } else if (Subtarget->getGeneration() == AMDGPUSubtarget::VOLCANIC_ISLANDS) { + } else if (Subtarget->getGeneration() == SISubtarget::VOLCANIC_ISLANDS) { // On VI, these use the SMEM format and the offset is 20-bit in bytes. if (!isUInt<20>(AM.BaseOffs)) return false; @@ -519,8 +523,7 @@ bool SITargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const { - const SIInstrInfo *TII = - static_cast(Subtarget->getInstrInfo()); + const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); return TII->isInlineConstant(Imm); } @@ -539,8 +542,7 @@ unsigned Offset) const { const DataLayout &DL = DAG.getDataLayout(); MachineFunction &MF = DAG.getMachineFunction(); - const SIRegisterInfo *TRI = - static_cast(Subtarget->getRegisterInfo()); + const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); unsigned InputPtrReg = TRI->getPreloadedValue(MF, SIRegisterInfo::KERNARG_SEGMENT_PTR); MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); @@ -579,13 +581,12 @@ SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl &Ins, const SDLoc &DL, SelectionDAG &DAG, SmallVectorImpl &InVals) const { - const SIRegisterInfo *TRI = - static_cast(Subtarget->getRegisterInfo()); + const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); MachineFunction &MF = DAG.getMachineFunction(); FunctionType *FType = MF.getFunction()->getFunctionType(); SIMachineFunctionInfo *Info = MF.getInfo(); - const AMDGPUSubtarget &ST = MF.getSubtarget(); + const SISubtarget &ST = MF.getSubtarget(); if (Subtarget->isAmdHsaOS() && AMDGPU::isShader(CallConv)) { const Function *Fn = MF.getFunction(); @@ -740,7 +741,7 @@ auto *ParamTy = dyn_cast(FType->getParamType(Ins[i].getOrigArgIndex())); - if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS && + if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS && ParamTy && ParamTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) { // On SI local pointers are just offsets into LDS, so they are always // less than 16-bits. On CI and newer they could potentially be @@ -1030,7 +1031,7 @@ } - if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS && + if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS && Subtarget->getRegisterInfo()->regsOverlap(Reg, AMDGPU::FLAT_SCR)) { report_fatal_error(Twine("invalid register \"" + StringRef(RegName) + "\" for subtarget.")); @@ -1062,8 +1063,7 @@ MachineInstr *MI, MachineBasicBlock *BB) const { switch (MI->getOpcode()) { case AMDGPU::SI_INIT_M0: { - const SIInstrInfo *TII = - static_cast(Subtarget->getInstrInfo()); + const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); BuildMI(*BB, MI->getIterator(), MI->getDebugLoc(), TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0) .addOperand(MI->getOperand(0)); @@ -1073,8 +1073,8 @@ case AMDGPU::BRANCH: return BB; case AMDGPU::GET_GROUPSTATICSIZE: { - const SIInstrInfo *TII = - static_cast(Subtarget->getInstrInfo()); + const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); + MachineFunction *MF = BB->getParent(); SIMachineFunctionInfo *MFI = MF->getInfo(); DebugLoc DL = MI->getDebugLoc(); @@ -1522,8 +1522,7 @@ SelectionDAG &DAG) const { MachineFunction &MF = DAG.getMachineFunction(); auto MFI = MF.getInfo(); - const SIRegisterInfo *TRI = - static_cast(Subtarget->getRegisterInfo()); + const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); EVT VT = Op.getValueType(); SDLoc DL(Op); @@ -1562,14 +1561,14 @@ case AMDGPUIntrinsic::AMDGPU_rsq: // Legacy name return DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1)); case Intrinsic::amdgcn_rsq_legacy: { - if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) + if (Subtarget->getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) return emitRemovedIntrinsicError(DAG, DL, VT); return DAG.getNode(AMDGPUISD::RSQ_LEGACY, DL, VT, Op.getOperand(1)); } case Intrinsic::amdgcn_rsq_clamp: case AMDGPUIntrinsic::AMDGPU_rsq_clamped: { // Legacy name - if (Subtarget->getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS) + if (Subtarget->getGeneration() < SISubtarget::VOLCANIC_ISLANDS) return DAG.getNode(AMDGPUISD::RSQ_CLAMP, DL, VT, Op.getOperand(1)); Type *Type = VT.getTypeForEVT(*DAG.getContext()); @@ -1730,7 +1729,7 @@ return DAG.getNode(AMDGPUISD::COS_HW, DL, VT, Op.getOperand(1)); case Intrinsic::amdgcn_log_clamp: { - if (Subtarget->getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS) + if (Subtarget->getGeneration() < SISubtarget::VOLCANIC_ISLANDS) return SDValue(); DiagnosticInfoUnsupported BadIntrin( @@ -2129,7 +2128,7 @@ SDValue Scale; - if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS) { + if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS) { // Workaround a hardware bug on SI where the condition output from div_scale // is not usable. @@ -2389,7 +2388,7 @@ /// \brief Return true if the given offset Size in bytes can be folded into /// the immediate offsets of a memory instruction for the given address space. static bool canFoldOffset(unsigned OffsetSize, unsigned AS, - const AMDGPUSubtarget &STI) { + const SISubtarget &STI) { switch (AS) { case AMDGPUAS::GLOBAL_ADDRESS: { // MUBUF instructions a 12-bit offset in bytes. @@ -2398,7 +2397,7 @@ case AMDGPUAS::CONSTANT_ADDRESS: { // SMRD instructions have an 8-bit offset in dwords on SI and // a 20-bit offset in bytes on VI. - if (STI.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) + if (STI.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) return isUInt<20>(OffsetSize); else return (OffsetSize % 4 == 0) && isUInt<8>(OffsetSize / 4); @@ -2449,7 +2448,7 @@ // If the resulting offset is too large, we can't fold it into the addressing // mode offset. APInt Offset = CAdd->getAPIntValue() << CN1->getAPIntValue(); - if (!canFoldOffset(Offset.getZExtValue(), AddrSpace, *Subtarget)) + if (!canFoldOffset(Offset.getZExtValue(), AddrSpace, *getSubtarget())) return SDValue(); SelectionDAG &DAG = DCI.DAG; @@ -3013,9 +3012,7 @@ /// Returns -1 if it isn't an immediate, 0 if it's and inline immediate /// and the immediate value if it's a literal immediate int32_t SITargetLowering::analyzeImmediate(const SDNode *N) const { - - const SIInstrInfo *TII = - static_cast(Subtarget->getInstrInfo()); + const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); if (const ConstantSDNode *Node = dyn_cast(N)) { if (TII->isInlineConstant(Node->getAPIntValue())) @@ -3163,8 +3160,7 @@ /// \brief Fold the instructions after selecting them. SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node, SelectionDAG &DAG) const { - const SIInstrInfo *TII = - static_cast(Subtarget->getInstrInfo()); + const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); unsigned Opcode = Node->getMachineOpcode(); if (TII->isMIMG(Opcode) && !TII->get(Opcode).mayStore()) @@ -3182,8 +3178,7 @@ /// bits set in the writemask void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr *MI, SDNode *Node) const { - const SIInstrInfo *TII = - static_cast(Subtarget->getInstrInfo()); + const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo(); @@ -3260,8 +3255,7 @@ MachineSDNode *SITargetLowering::wrapAddr64Rsrc(SelectionDAG &DAG, const SDLoc &DL, SDValue Ptr) const { - const SIInstrInfo *TII = - static_cast(Subtarget->getInstrInfo()); + const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); // Build the half of the subregister with the constants before building the // full 128-bit register. If we are building multiple resource descriptors, Index: lib/Target/AMDGPU/SIInsertWaits.cpp =================================================================== --- lib/Target/AMDGPU/SIInsertWaits.cpp +++ lib/Target/AMDGPU/SIInsertWaits.cpp @@ -55,6 +55,7 @@ class SIInsertWaits : public MachineFunctionPass { private: + const SISubtarget *ST; const SIInstrInfo *TII; const SIRegisterInfo *TRI; const MachineRegisterInfo *MRI; @@ -136,6 +137,7 @@ SIInsertWaits() : MachineFunctionPass(ID), + ST(nullptr), TII(nullptr), TRI(nullptr), ExpInstrTypesSeen(0), @@ -303,8 +305,7 @@ return; } - if (MBB.getParent()->getSubtarget().getGeneration() >= - AMDGPUSubtarget::VOLCANIC_ISLANDS) { + if (ST->getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) { // Any occurrence of consecutive VMEM or SMEM instructions forms a VMEM // or SMEM clause, respectively. // @@ -486,8 +487,7 @@ void SIInsertWaits::handleSendMsg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I) { - if (MBB.getParent()->getSubtarget().getGeneration() < - AMDGPUSubtarget::VOLCANIC_ISLANDS) + if (ST->getGeneration() < SISubtarget::VOLCANIC_ISLANDS) return; // There must be "S_NOP 0" between an instruction writing M0 and S_SENDMSG. @@ -514,11 +514,9 @@ bool SIInsertWaits::runOnMachineFunction(MachineFunction &MF) { bool Changes = false; - TII = static_cast(MF.getSubtarget().getInstrInfo()); - TRI = - static_cast(MF.getSubtarget().getRegisterInfo()); - - const AMDGPUSubtarget &ST = MF.getSubtarget(); + ST = &MF.getSubtarget(); + TII = ST->getInstrInfo(); + TRI = &TII->getRegisterInfo(); MRI = &MF.getRegInfo(); WaitedOn = ZeroCounts; @@ -540,7 +538,7 @@ for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E; ++I) { - if (ST.getGeneration() <= AMDGPUSubtarget::SEA_ISLANDS) { + if (ST->getGeneration() <= SISubtarget::SEA_ISLANDS) { // There is a hardware bug on CI/SI where SMRD instruction may corrupt // vccz bit, so when we detect that an instruction may read from a // corrupt vccz bit, we need to: Index: lib/Target/AMDGPU/SIInstrInfo.h =================================================================== --- lib/Target/AMDGPU/SIInstrInfo.h +++ lib/Target/AMDGPU/SIInstrInfo.h @@ -25,6 +25,7 @@ class SIInstrInfo final : public AMDGPUInstrInfo { private: const SIRegisterInfo RI; + const SISubtarget &ST; // The the inverse predicate should have the negative value. enum BranchPredicate { @@ -91,9 +92,9 @@ unsigned OpIdx1) const override; public: - explicit SIInstrInfo(const AMDGPUSubtarget &st); + explicit SIInstrInfo(const SISubtarget &); - const SIRegisterInfo &getRegisterInfo() const override { + const SIRegisterInfo &getRegisterInfo() const { return RI; } Index: lib/Target/AMDGPU/SIInstrInfo.cpp =================================================================== --- lib/Target/AMDGPU/SIInstrInfo.cpp +++ lib/Target/AMDGPU/SIInstrInfo.cpp @@ -28,8 +28,8 @@ using namespace llvm; -SIInstrInfo::SIInstrInfo(const AMDGPUSubtarget &st) - : AMDGPUInstrInfo(st), RI() {} +SIInstrInfo::SIInstrInfo(const SISubtarget &ST) + : AMDGPUInstrInfo(ST), RI(), ST(ST) {} //===----------------------------------------------------------------------===// // TargetInstrInfo callbacks @@ -730,9 +730,8 @@ unsigned Size) const { MachineFunction *MF = MBB.getParent(); SIMachineFunctionInfo *MFI = MF->getInfo(); - const AMDGPUSubtarget &ST = MF->getSubtarget(); - const SIRegisterInfo *TRI = - static_cast(ST.getRegisterInfo()); + const SISubtarget &ST = MF->getSubtarget(); + const SIRegisterInfo *TRI = ST.getRegisterInfo(); DebugLoc DL = MBB.findDebugLoc(MI); unsigned WorkGroupSize = MFI->getMaximumWorkGroupSize(*MF); unsigned WavefrontSize = ST.getWavefrontSize(); @@ -913,8 +912,8 @@ } case AMDGPU::SI_PC_ADD_REL_OFFSET: { - const SIRegisterInfo *TRI = - static_cast(ST.getRegisterInfo()); + const SIRegisterInfo *TRI + = static_cast(ST.getRegisterInfo()); MachineFunction &MF = *MBB.getParent(); unsigned Reg = MI->getOperand(0).getReg(); unsigned RegLo = TRI->getSubReg(Reg, AMDGPU::sub0); @@ -1460,7 +1459,7 @@ // Target-independent instructions do not have an implicit-use of EXEC, even // when they operate on VGPRs. Treating EXEC modifications as scheduling // boundaries prevents incorrect movements of such instructions. - const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); + const SIRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); if (MI->modifiesRegister(AMDGPU::EXEC, TRI)) return true; @@ -2422,8 +2421,8 @@ } else { // This instructions is the _OFFSET variant, so we need to convert it to // ADDR64. - assert(MBB.getParent()->getSubtarget().getGeneration() - < AMDGPUSubtarget::VOLCANIC_ISLANDS && + assert(MBB.getParent()->getSubtarget().getGeneration() + < SISubtarget::VOLCANIC_ISLANDS && "FIXME: Need to emit flat atomics here"); MachineOperand *VData = getNamedOperand(*MI, AMDGPU::OpName::vdata); @@ -2547,37 +2546,37 @@ } case AMDGPU::S_LSHL_B32: - if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) { + if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) { NewOpcode = AMDGPU::V_LSHLREV_B32_e64; swapOperands(Inst); } break; case AMDGPU::S_ASHR_I32: - if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) { + if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) { NewOpcode = AMDGPU::V_ASHRREV_I32_e64; swapOperands(Inst); } break; case AMDGPU::S_LSHR_B32: - if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) { + if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) { NewOpcode = AMDGPU::V_LSHRREV_B32_e64; swapOperands(Inst); } break; case AMDGPU::S_LSHL_B64: - if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) { + if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) { NewOpcode = AMDGPU::V_LSHLREV_B64; swapOperands(Inst); } break; case AMDGPU::S_ASHR_I64: - if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) { + if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) { NewOpcode = AMDGPU::V_ASHRREV_I64; swapOperands(Inst); } break; case AMDGPU::S_LSHR_B64: - if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) { + if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) { NewOpcode = AMDGPU::V_LSHRREV_B64; swapOperands(Inst); } @@ -3096,7 +3095,7 @@ if (ST.isAmdHsaOS()) { RsrcDataFormat |= (1ULL << 56); - if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) + if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) // Set MTYPE = 2 RsrcDataFormat |= (2ULL << 59); } @@ -3117,7 +3116,7 @@ // If TID_ENABLE is set, DATA_FORMAT specifies stride bits [14:17]. // Clear them unless we want a huge stride. - if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) + if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) Rsrc23 &= ~AMDGPU::RSRC_DATA_FORMAT; return Rsrc23; Index: lib/Target/AMDGPU/SIInstrInfo.td =================================================================== --- lib/Target/AMDGPU/SIInstrInfo.td +++ lib/Target/AMDGPU/SIInstrInfo.td @@ -7,9 +7,9 @@ // //===----------------------------------------------------------------------===// def isCI : Predicate<"Subtarget->getGeneration() " - ">= AMDGPUSubtarget::SEA_ISLANDS">; + ">= SISubtarget::SEA_ISLANDS">; def isCIOnly : Predicate<"Subtarget->getGeneration() ==" - "AMDGPUSubtarget::SEA_ISLANDS">, + "SISubtarget::SEA_ISLANDS">, AssemblerPredicate <"FeatureSeaIslands">; def DisableInst : Predicate <"false">, AssemblerPredicate<"FeatureDisable">; @@ -78,9 +78,9 @@ field bits<8> VI = vi; } -// Execpt for the NONE field, this must be kept in sync with the SISubtarget enum -// in AMDGPUInstrInfo.cpp -def SISubtarget { +// Execpt for the NONE field, this must be kept in sync with the +// SIEncodingFamily enum in AMDGPUInstrInfo.cpp +def SIEncodingFamily { int NONE = -1; int SI = 0; int VI = 1; @@ -425,7 +425,7 @@ }]>; class SGPRImm : PatLeafgetGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS) { + if (Subtarget->getGeneration() < SISubtarget::SOUTHERN_ISLANDS) { return false; } const SIRegisterInfo *SIRI = @@ -681,15 +681,15 @@ multiclass EXP_m { let isPseudo = 1, isCodeGenOnly = 1 in { - def "" : EXPCommon, SIMCInstr <"exp", SISubtarget.NONE> ; + def "" : EXPCommon, SIMCInstr <"exp", SIEncodingFamily.NONE> ; } - def _si : EXPCommon, SIMCInstr <"exp", SISubtarget.SI>, EXPe { + def _si : EXPCommon, SIMCInstr <"exp", SIEncodingFamily.SI>, EXPe { let DecoderNamespace="SICI"; let DisableDecoder = DisableSIDecoder; } - def _vi : EXPCommon, SIMCInstr <"exp", SISubtarget.VI>, EXPe_vi { + def _vi : EXPCommon, SIMCInstr <"exp", SIEncodingFamily.VI>, EXPe_vi { let DecoderNamespace="VI"; let DisableDecoder = DisableVIDecoder; } @@ -701,7 +701,7 @@ class SOP1_Pseudo pattern> : SOP1 , - SIMCInstr { + SIMCInstr { let isPseudo = 1; let isCodeGenOnly = 1; } @@ -709,7 +709,7 @@ class SOP1_Real_si : SOP1 , SOP1e , - SIMCInstr { + SIMCInstr { let isCodeGenOnly = 0; let AssemblerPredicates = [isSICI]; let DecoderNamespace = "SICI"; @@ -719,7 +719,7 @@ class SOP1_Real_vi : SOP1 , SOP1e , - SIMCInstr { + SIMCInstr { let isCodeGenOnly = 0; let AssemblerPredicates = [isVI]; let DecoderNamespace = "VI"; @@ -791,7 +791,7 @@ class SOP2_Pseudo pattern> : SOP2, - SIMCInstr { + SIMCInstr { let isPseudo = 1; let isCodeGenOnly = 1; let Size = 4; @@ -806,7 +806,7 @@ class SOP2_Real_si : SOP2, SOP2e, - SIMCInstr { + SIMCInstr { let AssemblerPredicates = [isSICI]; let DecoderNamespace = "SICI"; let DisableDecoder = DisableSIDecoder; @@ -815,7 +815,7 @@ class SOP2_Real_vi : SOP2, SOP2e, - SIMCInstr { + SIMCInstr { let AssemblerPredicates = [isVI]; let DecoderNamespace = "VI"; let DisableDecoder = DisableVIDecoder; @@ -875,7 +875,7 @@ class SOPK_Pseudo pattern> : SOPK , - SIMCInstr { + SIMCInstr { let isPseudo = 1; let isCodeGenOnly = 1; } @@ -883,7 +883,7 @@ class SOPK_Real_si : SOPK , SOPKe , - SIMCInstr { + SIMCInstr { let AssemblerPredicates = [isSICI]; let DecoderNamespace = "SICI"; let DisableDecoder = DisableSIDecoder; @@ -893,7 +893,7 @@ class SOPK_Real_vi : SOPK , SOPKe , - SIMCInstr { + SIMCInstr { let AssemblerPredicates = [isVI]; let DecoderNamespace = "VI"; let DisableDecoder = DisableVIDecoder; @@ -951,7 +951,7 @@ def _si : SOPK , SOPK64e , - SIMCInstr { + SIMCInstr { let AssemblerPredicates = [isSICI]; let DecoderNamespace = "SICI"; let DisableDecoder = DisableSIDecoder; @@ -960,7 +960,7 @@ def _vi : SOPK , SOPK64e , - SIMCInstr { + SIMCInstr { let AssemblerPredicates = [isVI]; let DecoderNamespace = "VI"; let DisableDecoder = DisableVIDecoder; @@ -973,7 +973,7 @@ class SMRD_Pseudo pattern> : SMRD , - SIMCInstr { + SIMCInstr { let isPseudo = 1; let isCodeGenOnly = 1; } @@ -982,7 +982,7 @@ string asm> : SMRD , SMRD_IMMe , - SIMCInstr { + SIMCInstr { let AssemblerPredicates = [isSICI]; let DecoderNamespace = "SICI"; let DisableDecoder = DisableSIDecoder; @@ -992,7 +992,7 @@ string asm> : SMRD , SMRD_SOFFe , - SIMCInstr { + SIMCInstr { let AssemblerPredicates = [isSICI]; let DecoderNamespace = "SICI"; let DisableDecoder = DisableSIDecoder; @@ -1003,7 +1003,7 @@ string asm, list pattern = []> : SMRD , SMEM_IMMe_vi , - SIMCInstr { + SIMCInstr { let AssemblerPredicates = [isVI]; let DecoderNamespace = "VI"; let DisableDecoder = DisableVIDecoder; @@ -1013,7 +1013,7 @@ string asm, list pattern = []> : SMRD , SMEM_SOFFe_vi , - SIMCInstr { + SIMCInstr { let AssemblerPredicates = [isVI]; let DecoderNamespace = "VI"; let DisableDecoder = DisableVIDecoder; @@ -1342,7 +1342,7 @@ class getAsmSDWA { - string dst = !if(HasDst, + string dst = !if(HasDst, !if(!eq(DstVT.Size, 1), "$sdst", // use $sdst for VOPC "$vdst"), @@ -1350,8 +1350,8 @@ string src0 = !if(HasFloatModifiers, "$src0_fmodifiers", "$src0_imodifiers"); string src1 = !if(HasFloatModifiers, "$src1_fmodifiers", "$src1_imodifiers"); string args = !if(!eq(NumSrcArgs, 0), - "", - !if(!eq(NumSrcArgs, 1), + "", + !if(!eq(NumSrcArgs, 1), ", "#src0#"$clamp", ", "#src0#", "#src1#"$clamp" ) @@ -1652,7 +1652,7 @@ class VOP1_Pseudo pattern, string opName> : VOP1Common , VOP , - SIMCInstr , + SIMCInstr , MnemonicAlias { let isPseudo = 1; let isCodeGenOnly = 1; @@ -1663,7 +1663,7 @@ class VOP1_Real_si : VOP1, - SIMCInstr { + SIMCInstr { let AssemblerPredicate = SIAssemblerPredicate; let DecoderNamespace = "SICI"; let DisableDecoder = DisableSIDecoder; @@ -1671,7 +1671,7 @@ class VOP1_Real_vi : VOP1, - SIMCInstr { + SIMCInstr { let AssemblerPredicates = [isVI]; let DecoderNamespace = "VI"; let DisableDecoder = DisableVIDecoder; @@ -1741,7 +1741,7 @@ class VOP2_Pseudo pattern, string opName> : VOP2Common , VOP , - SIMCInstr, + SIMCInstr, MnemonicAlias { let isPseudo = 1; let isCodeGenOnly = 1; @@ -1749,7 +1749,7 @@ class VOP2_Real_si : VOP2 , - SIMCInstr { + SIMCInstr { let AssemblerPredicates = [isSICI]; let DecoderNamespace = "SICI"; let DisableDecoder = DisableSIDecoder; @@ -1757,7 +1757,7 @@ class VOP2_Real_vi : VOP2 , - SIMCInstr { + SIMCInstr { let AssemblerPredicates = [isVI]; let DecoderNamespace = "VI"; let DisableDecoder = DisableVIDecoder; @@ -1830,7 +1830,7 @@ bit HasMods = 0, bit VOP3Only = 0> : VOP3Common , VOP , - SIMCInstr, + SIMCInstr, MnemonicAlias { let isPseudo = 1; let isCodeGenOnly = 1; @@ -1843,7 +1843,7 @@ bit HasMods = 0, bit VOP3Only = 0> : VOP3Common , VOP3e , - SIMCInstr { + SIMCInstr { let AssemblerPredicates = [isSICI]; let DecoderNamespace = "SICI"; let DisableDecoder = DisableSIDecoder; @@ -1853,7 +1853,7 @@ bit HasMods = 0, bit VOP3Only = 0> : VOP3Common , VOP3e_vi , - SIMCInstr { + SIMCInstr { let AssemblerPredicates = [isVI]; let DecoderNamespace = "VI"; let DisableDecoder = DisableVIDecoder; @@ -1863,7 +1863,7 @@ bit HasMods = 0, bit VOP3Only = 0> : VOP3Common , VOP3ce , - SIMCInstr { + SIMCInstr { let AssemblerPredicates = [isSICI]; let DecoderNamespace = "SICI"; let DisableDecoder = DisableSIDecoder; @@ -1873,7 +1873,7 @@ bit HasMods = 0, bit VOP3Only = 0> : VOP3Common , VOP3ce_vi , - SIMCInstr { + SIMCInstr { let AssemblerPredicates = [isVI]; let DecoderNamespace = "VI"; let DisableDecoder = DisableVIDecoder; @@ -1883,7 +1883,7 @@ bit HasMods = 0, bit VOP3Only = 0> : VOP3Common , VOP3be , - SIMCInstr { + SIMCInstr { let AssemblerPredicates = [isSICI]; let DecoderNamespace = "SICI"; let DisableDecoder = DisableSIDecoder; @@ -1893,7 +1893,7 @@ bit HasMods = 0, bit VOP3Only = 0> : VOP3Common , VOP3be_vi , - SIMCInstr { + SIMCInstr { let AssemblerPredicates = [isVI]; let DecoderNamespace = "VI"; let DisableDecoder = DisableVIDecoder; @@ -1903,7 +1903,7 @@ bit HasMods = 0, bit VOP3Only = 0> : VOP3Common , VOP3e , - SIMCInstr { + SIMCInstr { let AssemblerPredicates = [isSICI]; let DecoderNamespace = "SICI"; let DisableDecoder = DisableSIDecoder; @@ -1913,7 +1913,7 @@ bit HasMods = 0, bit VOP3Only = 0> : VOP3Common , VOP3e_vi , - SIMCInstr { + SIMCInstr { let AssemblerPredicates = [isVI]; let DecoderNamespace = "VI"; let DisableDecoder = DisableVIDecoder; @@ -2039,11 +2039,11 @@ string asm, list pattern = []> { let isPseudo = 1, isCodeGenOnly = 1 in { def "" : VOPAnyCommon , - SIMCInstr; + SIMCInstr; } def _si : VOP2 , - SIMCInstr { + SIMCInstr { let AssemblerPredicates = [isSICI]; let DecoderNamespace = "SICI"; let DisableDecoder = DisableSIDecoder; @@ -2052,7 +2052,7 @@ def _vi : VOP3Common , VOP3e_vi , VOP3DisableFields <1, 0, 0>, - SIMCInstr { + SIMCInstr { let AssemblerPredicates = [isVI]; let DecoderNamespace = "VI"; let DisableDecoder = DisableVIDecoder; @@ -2221,7 +2221,7 @@ let isCodeGenOnly = 0 in { def _si : VOP2Common , - SIMCInstr , + SIMCInstr , VOP2_MADKe { let AssemblerPredicates = [isSICI]; let DecoderNamespace = "SICI"; @@ -2230,7 +2230,7 @@ def _vi : VOP2Common , - SIMCInstr , + SIMCInstr , VOP2_MADKe { let AssemblerPredicates = [isVI]; let DecoderNamespace = "VI"; @@ -2242,7 +2242,7 @@ class VOPC_Pseudo pattern, string opName> : VOPCCommon , VOP , - SIMCInstr { + SIMCInstr { let isPseudo = 1; let isCodeGenOnly = 1; } @@ -2260,7 +2260,7 @@ let AssemblerPredicates = [isSICI] in { def _si : VOPC, - SIMCInstr { + SIMCInstr { let Defs = !if(DefExec, [VCC, EXEC], [VCC]); let hasSideEffects = DefExec; let SchedRW = sched; @@ -2272,7 +2272,7 @@ let AssemblerPredicates = [isVI] in { def _vi : VOPC, - SIMCInstr { + SIMCInstr { let Defs = !if(DefExec, [VCC, EXEC], [VCC]); let hasSideEffects = DefExec; let SchedRW = sched; @@ -2459,7 +2459,7 @@ class VINTRP_Pseudo pattern> : VINTRPCommon , - SIMCInstr { + SIMCInstr { let isPseudo = 1; let isCodeGenOnly = 1; } @@ -2468,7 +2468,7 @@ string asm> : VINTRPCommon , VINTRPe , - SIMCInstr { + SIMCInstr { let AssemblerPredicate = SIAssemblerPredicate; let DecoderNamespace = "SICI"; let DisableDecoder = DisableSIDecoder; @@ -2478,7 +2478,7 @@ string asm> : VINTRPCommon , VINTRPe_vi , - SIMCInstr { + SIMCInstr { let AssemblerPredicate = VIAssemblerPredicate; let DecoderNamespace = "VI"; let DisableDecoder = DisableVIDecoder; @@ -2499,7 +2499,7 @@ class DS_Pseudo pattern> : DS , - SIMCInstr { + SIMCInstr { let isPseudo = 1; let isCodeGenOnly = 1; } @@ -2507,7 +2507,7 @@ class DS_Real_si op, string opName, dag outs, dag ins, string asm> : DS , DSe , - SIMCInstr { + SIMCInstr { let isCodeGenOnly = 0; let AssemblerPredicates = [isSICI]; let DecoderNamespace="SICI"; @@ -2517,7 +2517,7 @@ class DS_Real_vi op, string opName, dag outs, dag ins, string asm> : DS , DSe_vi , - SIMCInstr { + SIMCInstr { let isCodeGenOnly = 0; let AssemblerPredicates = [isVI]; let DecoderNamespace="VI"; @@ -2730,7 +2730,7 @@ class MTBUF_Pseudo pattern> : MTBUF , - SIMCInstr { + SIMCInstr { let isPseudo = 1; let isCodeGenOnly = 1; } @@ -2739,7 +2739,7 @@ string asm> : MTBUF , MTBUFe , - SIMCInstr { + SIMCInstr { let DecoderNamespace="SICI"; let DisableDecoder = DisableSIDecoder; } @@ -2747,7 +2747,7 @@ class MTBUF_Real_vi op, string opName, dag outs, dag ins, string asm> : MTBUF , MTBUFe_vi , - SIMCInstr { + SIMCInstr { let DecoderNamespace="VI"; let DisableDecoder = DisableVIDecoder; } @@ -2821,7 +2821,7 @@ class MUBUF_Pseudo pattern> : MUBUF , - SIMCInstr { + SIMCInstr { let isPseudo = 1; let isCodeGenOnly = 1; @@ -2839,7 +2839,7 @@ string asm> : MUBUF , MUBUFe , - SIMCInstr { + SIMCInstr { let lds = 0; let AssemblerPredicate = SIAssemblerPredicate; let DecoderNamespace="SICI"; @@ -2850,7 +2850,7 @@ string asm> : MUBUF , MUBUFe_vi , - SIMCInstr { + SIMCInstr { let lds = 0; let AssemblerPredicate = VIAssemblerPredicate; let DecoderNamespace="VI"; @@ -3174,21 +3174,21 @@ class FLAT_Pseudo pattern> : FLAT <0, outs, ins, "", pattern>, - SIMCInstr { + SIMCInstr { let isPseudo = 1; let isCodeGenOnly = 1; } class FLAT_Real_ci op, string opName, dag outs, dag ins, string asm> : FLAT , - SIMCInstr { + SIMCInstr { let AssemblerPredicate = isCIOnly; let DecoderNamespace="CI"; } class FLAT_Real_vi op, string opName, dag outs, dag ins, string asm> : FLAT , - SIMCInstr { + SIMCInstr { let AssemblerPredicate = VIAssemblerPredicate; let DecoderNamespace="VI"; let DisableDecoder = DisableVIDecoder; @@ -3375,7 +3375,7 @@ class MIMG_Atomic_Real_si : MIMG_Atomic_Helper, - SIMCInstr, + SIMCInstr, MIMGe { let isCodeGenOnly = 0; let AssemblerPredicates = [isSICI]; @@ -3386,7 +3386,7 @@ class MIMG_Atomic_Real_vi : MIMG_Atomic_Helper, - SIMCInstr, + SIMCInstr, MIMGe { let isCodeGenOnly = 0; let AssemblerPredicates = [isVI]; @@ -3398,7 +3398,7 @@ RegisterClass data_rc, RegisterClass addr_rc> { let isPseudo = 1, isCodeGenOnly = 1 in { def "" : MIMG_Atomic_Helper, - SIMCInstr; + SIMCInstr; } let ssamp = 0 in { @@ -3573,8 +3573,9 @@ let FilterClass = "SIMCInstr"; let RowFields = ["PseudoInstr"]; let ColFields = ["Subtarget"]; - let KeyCol = [!cast(SISubtarget.NONE)]; - let ValueCols = [[!cast(SISubtarget.SI)],[!cast(SISubtarget.VI)]]; + let KeyCol = [!cast(SIEncodingFamily.NONE)]; + let ValueCols = [[!cast(SIEncodingFamily.SI)], + [!cast(SIEncodingFamily.VI)]]; } def getAddr64Inst : InstrMapping { Index: lib/Target/AMDGPU/SIInstructions.td =================================================================== --- lib/Target/AMDGPU/SIInstructions.td +++ lib/Target/AMDGPU/SIInstructions.td @@ -19,10 +19,10 @@ def INTERP : InterpSlots; def isGCN : Predicate<"Subtarget->getGeneration() " - ">= AMDGPUSubtarget::SOUTHERN_ISLANDS">, + ">= SISubtarget::SOUTHERN_ISLANDS">, AssemblerPredicate<"FeatureGCN">; def isSI : Predicate<"Subtarget->getGeneration() " - "== AMDGPUSubtarget::SOUTHERN_ISLANDS">, + "== SISubtarget::SOUTHERN_ISLANDS">, AssemblerPredicate<"FeatureSouthernIslands">; Index: lib/Target/AMDGPU/SILoadStoreOptimizer.cpp =================================================================== --- lib/Target/AMDGPU/SILoadStoreOptimizer.cpp +++ lib/Target/AMDGPU/SILoadStoreOptimizer.cpp @@ -36,6 +36,7 @@ //===----------------------------------------------------------------------===// #include "AMDGPU.h" +#include "AMDGPUSubtarget.h" #include "SIInstrInfo.h" #include "SIRegisterInfo.h" #include "llvm/CodeGen/LiveIntervalAnalysis.h" @@ -61,7 +62,6 @@ MachineRegisterInfo *MRI; LiveIntervals *LIS; - static bool offsetsCanBeCombined(unsigned Offset0, unsigned Offset1, unsigned EltSize); @@ -411,9 +411,10 @@ if (skipFunction(*MF.getFunction())) return false; - const TargetSubtargetInfo &STM = MF.getSubtarget(); - TRI = static_cast(STM.getRegisterInfo()); - TII = static_cast(STM.getInstrInfo()); + const SISubtarget &STM = MF.getSubtarget(); + TII = STM.getInstrInfo(); + TRI = &TII->getRegisterInfo(); + MRI = &MF.getRegInfo(); LIS = &getAnalysis(); Index: lib/Target/AMDGPU/SILowerControlFlow.cpp =================================================================== --- lib/Target/AMDGPU/SILowerControlFlow.cpp +++ lib/Target/AMDGPU/SILowerControlFlow.cpp @@ -590,9 +590,10 @@ } bool SILowerControlFlow::runOnMachineFunction(MachineFunction &MF) { - TII = static_cast(MF.getSubtarget().getInstrInfo()); - TRI = - static_cast(MF.getSubtarget().getRegisterInfo()); + const SISubtarget &ST = MF.getSubtarget(); + TII = ST.getInstrInfo(); + TRI = &TII->getRegisterInfo(); + SIMachineFunctionInfo *MFI = MF.getInfo(); bool HaveKill = false; Index: lib/Target/AMDGPU/SILowerI1Copies.cpp =================================================================== --- lib/Target/AMDGPU/SILowerI1Copies.cpp +++ lib/Target/AMDGPU/SILowerI1Copies.cpp @@ -66,9 +66,10 @@ bool SILowerI1Copies::runOnMachineFunction(MachineFunction &MF) { MachineRegisterInfo &MRI = MF.getRegInfo(); - const SIInstrInfo *TII = - static_cast(MF.getSubtarget().getInstrInfo()); - const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); + const SISubtarget &ST = MF.getSubtarget(); + const SIInstrInfo *TII = ST.getInstrInfo(); + const TargetRegisterInfo *TRI = &TII->getRegisterInfo(); + std::vector I1Defs; for (MachineFunction::iterator BI = MF.begin(), BE = MF.end(); Index: lib/Target/AMDGPU/SIMachineFunctionInfo.cpp =================================================================== --- lib/Target/AMDGPU/SIMachineFunctionInfo.cpp +++ lib/Target/AMDGPU/SIMachineFunctionInfo.cpp @@ -75,7 +75,7 @@ WorkItemIDX(false), WorkItemIDY(false), WorkItemIDZ(false) { - const AMDGPUSubtarget &ST = MF.getSubtarget(); + const SISubtarget &ST = MF.getSubtarget(); const Function *F = MF.getFunction(); PSInputAddr = AMDGPU::getInitialPSInputAddr(*F); @@ -125,7 +125,7 @@ // We don't need to worry about accessing spills with flat instructions. // TODO: On VI where we must use flat for global, we should be able to omit // this if it is never used for generic access. - if (HasStackObjects && ST.getGeneration() >= AMDGPUSubtarget::SEA_ISLANDS && + if (HasStackObjects && ST.getGeneration() >= SISubtarget::SEA_ISLANDS && ST.isAmdHsaOS()) FlatScratchInit = true; @@ -174,13 +174,14 @@ return FlatScratchInitUserSGPR; } -SIMachineFunctionInfo::SpilledReg SIMachineFunctionInfo::getSpilledReg( +SIMachineFunctionInfo::SpilledReg SIMachineFunctionInfo::getSpilledReg ( MachineFunction *MF, unsigned FrameIndex, unsigned SubIdx) { + const SISubtarget &ST = MF->getSubtarget(); + const SIRegisterInfo *TRI = ST.getRegisterInfo(); + MachineFrameInfo *FrameInfo = MF->getFrameInfo(); - const SIRegisterInfo *TRI = static_cast( - MF->getSubtarget().getRegisterInfo()); MachineRegisterInfo &MRI = MF->getRegInfo(); int64_t Offset = FrameInfo->getObjectOffset(FrameIndex); Offset += SubIdx * 4; Index: lib/Target/AMDGPU/SIMachineScheduler.cpp =================================================================== --- lib/Target/AMDGPU/SIMachineScheduler.cpp +++ lib/Target/AMDGPU/SIMachineScheduler.cpp @@ -12,8 +12,8 @@ // //===----------------------------------------------------------------------===// +#include "AMDGPU.h" #include "SIMachineScheduler.h" -#include "AMDGPUSubtarget.h" #include "llvm/CodeGen/LiveInterval.h" #include "llvm/CodeGen/LiveIntervalAnalysis.h" #include "llvm/CodeGen/MachineRegisterInfo.h" Index: lib/Target/AMDGPU/SIRegisterInfo.h =================================================================== --- lib/Target/AMDGPU/SIRegisterInfo.h +++ lib/Target/AMDGPU/SIRegisterInfo.h @@ -12,17 +12,17 @@ // //===----------------------------------------------------------------------===// - #ifndef LLVM_LIB_TARGET_AMDGPU_SIREGISTERINFO_H #define LLVM_LIB_TARGET_AMDGPU_SIREGISTERINFO_H #include "AMDGPURegisterInfo.h" -#include "AMDGPUSubtarget.h" #include "llvm/CodeGen/MachineRegisterInfo.h" -#include "llvm/Support/Debug.h" namespace llvm { +class SISubtarget; +class MachineRegisterInfo; + struct SIRegisterInfo final : public AMDGPURegisterInfo { private: unsigned SGPR32SetID; @@ -80,7 +80,9 @@ unsigned FIOperandNum, RegScavenger *RS) const override; - unsigned getHWRegIndex(unsigned Reg) const override; + unsigned getHWRegIndex(unsigned Reg) const { + return getEncodingValue(Reg) & 0xff; + } /// \brief Return the 'base' register class for this register. /// e.g. SGPR0 => SReg_32, VGPR => VGPR_32 SGPR0_SGPR1 -> SReg_32, etc. @@ -179,8 +181,7 @@ /// \brief Give the maximum number of SGPRs that can be used by \p WaveCount /// concurrent waves. - unsigned getNumSGPRsAllowed(AMDGPUSubtarget::Generation gen, - unsigned WaveCount) const; + unsigned getNumSGPRsAllowed(const SISubtarget &ST, unsigned WaveCount) const; unsigned findUnusedRegister(const MachineRegisterInfo &MRI, const TargetRegisterClass *RC) const; Index: lib/Target/AMDGPU/SIRegisterInfo.cpp =================================================================== --- lib/Target/AMDGPU/SIRegisterInfo.cpp +++ lib/Target/AMDGPU/SIRegisterInfo.cpp @@ -15,6 +15,7 @@ #include "SIRegisterInfo.h" #include "SIInstrInfo.h" #include "SIMachineFunctionInfo.h" +#include "AMDGPUSubtarget.h" #include "llvm/CodeGen/MachineFrameInfo.h" #include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/RegisterScavenging.h" @@ -24,8 +25,8 @@ using namespace llvm; static unsigned getMaxWaveCountPerSIMD(const MachineFunction &MF) { - const SIMachineFunctionInfo& MFI = *MF.getInfo(); - const AMDGPUSubtarget &ST = MF.getSubtarget(); + const SIMachineFunctionInfo &MFI = *MF.getInfo(); + const SISubtarget &ST = MF.getSubtarget(); unsigned SIMDPerCU = 4; unsigned MaxInvocationsPerWave = SIMDPerCU * ST.getWavefrontSize(); @@ -34,13 +35,13 @@ } static unsigned getMaxWorkGroupSGPRCount(const MachineFunction &MF) { - const AMDGPUSubtarget &ST = MF.getSubtarget(); + const SISubtarget &ST = MF.getSubtarget(); unsigned MaxWaveCountPerSIMD = getMaxWaveCountPerSIMD(MF); unsigned TotalSGPRCountPerSIMD, AddressableSGPRCount, SGPRUsageAlignment; unsigned ReservedSGPRCount; - if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) { + if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) { TotalSGPRCountPerSIMD = 800; AddressableSGPRCount = 102; SGPRUsageAlignment = 16; @@ -56,7 +57,7 @@ MaxSGPRCount = alignDown(MaxSGPRCount, SGPRUsageAlignment); if (ST.hasSGPRInitBug()) - MaxSGPRCount = AMDGPUSubtarget::FIXED_SGPR_COUNT_FOR_INIT_BUG; + MaxSGPRCount = SISubtarget::FIXED_SGPR_COUNT_FOR_INIT_BUG; return std::min(MaxSGPRCount - ReservedSGPRCount, AddressableSGPRCount); } @@ -195,7 +196,7 @@ // Reserve registers for debugger usage if "amdgpu-debugger-reserve-trap-regs" // attribute was specified. - const AMDGPUSubtarget &ST = MF.getSubtarget(); + const SISubtarget &ST = MF.getSubtarget(); if (ST.debuggerReserveRegs()) { unsigned ReservedVGPRFirst = MaxWorkGroupVGPRCount - MFI->getDebuggerReservedVGPRCount(); @@ -210,10 +211,9 @@ unsigned SIRegisterInfo::getRegPressureSetLimit(const MachineFunction &MF, unsigned Idx) const { - const AMDGPUSubtarget &STI = MF.getSubtarget(); + const SISubtarget &STI = MF.getSubtarget(); // FIXME: We should adjust the max number of waves based on LDS size. - unsigned SGPRLimit = getNumSGPRsAllowed(STI.getGeneration(), - STI.getMaxWavesPerCU()); + unsigned SGPRLimit = getNumSGPRsAllowed(STI, STI.getMaxWavesPerCU()); unsigned VGPRLimit = getNumVGPRsAllowed(STI.getMaxWavesPerCU()); unsigned VSLimit = SGPRLimit + VGPRLimit; @@ -274,8 +274,8 @@ DL = Ins->getDebugLoc(); MachineFunction *MF = MBB->getParent(); - const AMDGPUSubtarget &Subtarget = MF->getSubtarget(); - const TargetInstrInfo *TII = Subtarget.getInstrInfo(); + const SISubtarget &Subtarget = MF->getSubtarget(); + const SIInstrInfo *TII = Subtarget.getInstrInfo(); if (Offset == 0) { BuildMI(*MBB, Ins, DL, TII->get(AMDGPU::V_MOV_B32_e32), BaseReg) @@ -297,9 +297,8 @@ MachineBasicBlock *MBB = MI.getParent(); MachineFunction *MF = MBB->getParent(); - const AMDGPUSubtarget &Subtarget = MF->getSubtarget(); - const SIInstrInfo *TII - = static_cast(Subtarget.getInstrInfo()); + const SISubtarget &Subtarget = MF->getSubtarget(); + const SIInstrInfo *TII = Subtarget.getInstrInfo(); #ifndef NDEBUG // FIXME: Is it possible to be storing a frame index to itself? @@ -409,10 +408,11 @@ bool IsKill = SrcDst->isKill(); MachineBasicBlock *MBB = MI->getParent(); MachineFunction *MF = MI->getParent()->getParent(); - const SIInstrInfo *TII = - static_cast(MF->getSubtarget().getInstrInfo()); + const SISubtarget &ST = MF->getSubtarget(); + const SIInstrInfo *TII = ST.getInstrInfo(); + DebugLoc DL = MI->getDebugLoc(); - bool IsStore = TII->get(LoadStoreOp).mayStore(); + bool IsStore = MI->mayStore(); bool RanOutOfSGPRs = false; bool Scavenged = false; @@ -489,8 +489,8 @@ MachineBasicBlock *MBB = MI->getParent(); SIMachineFunctionInfo *MFI = MF->getInfo(); MachineFrameInfo *FrameInfo = MF->getFrameInfo(); - const SIInstrInfo *TII = - static_cast(MF->getSubtarget().getInstrInfo()); + const SISubtarget &ST = MF->getSubtarget(); + const SIInstrInfo *TII = ST.getInstrInfo(); DebugLoc DL = MI->getDebugLoc(); MachineOperand &FIOp = MI->getOperand(FIOperandNum); @@ -662,10 +662,6 @@ } } -unsigned SIRegisterInfo::getHWRegIndex(unsigned Reg) const { - return getEncodingValue(Reg) & 0xff; -} - // FIXME: This is very slow. It might be worth creating a map from physreg to // register class. const TargetRegisterClass *SIRegisterInfo::getPhysRegClass(unsigned Reg) const { @@ -900,7 +896,7 @@ enum PreloadedValue Value) const { const SIMachineFunctionInfo *MFI = MF.getInfo(); - const AMDGPUSubtarget &ST = MF.getSubtarget(); + const SISubtarget &ST = MF.getSubtarget(); (void)ST; switch (Value) { case SIRegisterInfo::WORKGROUP_ID_X: @@ -971,9 +967,9 @@ } } -unsigned SIRegisterInfo::getNumSGPRsAllowed(AMDGPUSubtarget::Generation gen, +unsigned SIRegisterInfo::getNumSGPRsAllowed(const SISubtarget &ST, unsigned WaveCount) const { - if (gen >= AMDGPUSubtarget::VOLCANIC_ISLANDS) { + if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) { switch (WaveCount) { case 10: return 80; case 9: return 80; Index: lib/Target/AMDGPU/SIShrinkInstructions.cpp =================================================================== --- lib/Target/AMDGPU/SIShrinkInstructions.cpp +++ lib/Target/AMDGPU/SIShrinkInstructions.cpp @@ -199,9 +199,10 @@ return false; MachineRegisterInfo &MRI = MF.getRegInfo(); - const SIInstrInfo *TII = - static_cast(MF.getSubtarget().getInstrInfo()); + const SISubtarget &ST = MF.getSubtarget(); + const SIInstrInfo *TII = ST.getInstrInfo(); const SIRegisterInfo &TRI = TII->getRegisterInfo(); + std::vector I1Defs; for (MachineFunction::iterator BI = MF.begin(), BE = MF.end(); Index: lib/Target/AMDGPU/SIWholeQuadMode.cpp =================================================================== --- lib/Target/AMDGPU/SIWholeQuadMode.cpp +++ lib/Target/AMDGPU/SIWholeQuadMode.cpp @@ -476,8 +476,10 @@ ExecExports.clear(); LiveMaskQueries.clear(); - TII = static_cast(MF.getSubtarget().getInstrInfo()); - TRI = static_cast(MF.getSubtarget().getRegisterInfo()); + const SISubtarget &ST = MF.getSubtarget(); + + TII = ST.getInstrInfo(); + TRI = &TII->getRegisterInfo(); MRI = &MF.getRegInfo(); char GlobalFlags = analyzeFunction(MF);