Index: llvm/lib/Target/M68k/CMakeLists.txt =================================================================== --- llvm/lib/Target/M68k/CMakeLists.txt +++ llvm/lib/Target/M68k/CMakeLists.txt @@ -16,7 +16,19 @@ # M68kCodeGen should match with LLVMBuild.txt M68kCodeGen add_llvm_target(M68kCodeGen + M68kAsmPrinter.cpp + M68kCollapseMOVEMPass.cpp + M68kExpandPseudo.cpp + M68kFrameLowering.cpp + M68kInstrInfo.cpp + M68kISelLowering.cpp + M68kISelDAGToDAG.cpp + M68kMachineFunction.cpp + M68kMCInstLower.cpp + M68kRegisterInfo.cpp + M68kSubtarget.cpp M68kTargetMachine.cpp + M68kTargetObjectFile.cpp LINK_COMPONENTS Analysis Index: llvm/lib/Target/M68k/M68k.h =================================================================== --- /dev/null +++ llvm/lib/Target/M68k/M68k.h @@ -0,0 +1,52 @@ +//===- M68k.h - Top-level interface for M68k representation -*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file contains the entry points for global functions defined in the +/// M68k target library, as used by the LLVM JIT. +/// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIB_TARGET_M68k_M68k_H +#define LLVM_LIB_TARGET_M68k_M68k_H + +#include "llvm/Support/CodeGen.h" + +namespace llvm { + +class FunctionPass; +class M68kTargetMachine; + +/// This pass converts a legalized DAG into a M68k-specific DAG, ready for +/// instruction scheduling. +FunctionPass *createM68kISelDag(M68kTargetMachine &TM); + +/// Return a Machine IR pass that expands M68k-specific pseudo +/// instructions into a sequence of actual instructions. This pass +/// must run after prologue/epilogue insertion and before lowering +/// the MachineInstr to MC. +FunctionPass *createM68kExpandPseudoPass(); + +/// This pass initializes a global base register for PIC on M68k. +FunctionPass *createM68kGlobalBaseRegPass(); + +/// Finds sequential MOVEM instruction and collapse them into a single one. This +/// pass has to be run after all pseudo expansions and prologue/epilogue +/// emission so that all possible MOVEM are already in place. +FunctionPass *createM68kCollapseMOVEMPass(); + +/// Finds MOVE instructions before any conditioanl branch instruction and +/// replaces them with MOVEM instruction. Motorola's MOVEs do trash(V,C) flags +/// register which prevents branch from taking the correct route. This pass +/// has to be run after all pseudo expansions and prologue/epilogue emission +/// so that all possible MOVEs are present. +FunctionPass *createM68kConvertMOVToMOVMPass(); + +} // namespace llvm + +#endif Index: llvm/lib/Target/M68k/M68kAsmPrinter.h =================================================================== --- /dev/null +++ llvm/lib/Target/M68k/M68kAsmPrinter.h @@ -0,0 +1,62 @@ +//===----- M68kAsmPrinter.h - M68k LLVM Assembly Printer -------- C++ -*--===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file contains M68k assembler printer declarations. +/// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIB_TARGET_M68K_M68KASMPRINTER_H +#define LLVM_LIB_TARGET_M68K_M68KASMPRINTER_H + +#include "M68kMCInstLower.h" +#include "M68kTargetMachine.h" + +#include "llvm/CodeGen/AsmPrinter.h" +#include "llvm/MC/MCStreamer.h" +#include "llvm/Support/Compiler.h" +#include "llvm/Target/TargetMachine.h" + +namespace llvm { +class MCStreamer; +class MachineInstr; +class MachineBasicBlock; +class Module; +class raw_ostream; + +class M68kSubtarget; +class M68kMachineFunctionInfo; + +class LLVM_LIBRARY_VISIBILITY M68kAsmPrinter : public AsmPrinter { + + void EmitInstrWithMacroNoAT(const MachineInstr *MI); + +public: + const M68kSubtarget *Subtarget; + const M68kMachineFunctionInfo *MMFI; + std::unique_ptr MCInstLowering; + + explicit M68kAsmPrinter(TargetMachine &TM, + std::unique_ptr Streamer) + : AsmPrinter(TM, std::move(Streamer)) { + Subtarget = static_cast(TM).getSubtargetImpl(); + } + + StringRef getPassName() const override { return "M68k Assembly Printer"; } + + virtual bool runOnMachineFunction(MachineFunction &MF) override; + + void emitInstruction(const MachineInstr *MI) override; + void emitFunctionBodyStart() override; + void emitFunctionBodyEnd() override; + void emitStartOfAsmFile(Module &M) override; + void emitEndOfAsmFile(Module &M) override; +}; +} // namespace llvm + +#endif Index: llvm/lib/Target/M68k/M68kAsmPrinter.cpp =================================================================== --- /dev/null +++ llvm/lib/Target/M68k/M68kAsmPrinter.cpp @@ -0,0 +1,99 @@ +//===----- M68kAsmPrinter.cpp - M68k LLVM Assembly Printer -----*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file contains a printer that converts from our internal representation +/// of machine-dependent LLVM code to GAS-format M68k assembly language. +/// +//===----------------------------------------------------------------------===// + +// TODO #33 make it print Motorola asm + +#include "M68kAsmPrinter.h" + +#include "M68k.h" +#include "M68kInstrInfo.h" +#include "M68kMachineFunction.h" + +#include "MCTargetDesc/M68kBaseInfo.h" +#include "MCTargetDesc/M68kInstPrinter.h" + +#include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/SmallString.h" +#include "llvm/ADT/StringExtras.h" +#include "llvm/ADT/Twine.h" +#include "llvm/CodeGen/MachineConstantPool.h" +#include "llvm/CodeGen/MachineFrameInfo.h" +#include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/CodeGen/MachineInstr.h" +#include "llvm/CodeGen/MachineMemOperand.h" +#include "llvm/IR/BasicBlock.h" +#include "llvm/IR/Instructions.h" +#include "llvm/IR/Mangler.h" +#include "llvm/MC/MCAsmInfo.h" +#include "llvm/MC/MCContext.h" +#include "llvm/MC/MCInst.h" +#include "llvm/MC/MCInstBuilder.h" +#include "llvm/MC/MCStreamer.h" +#include "llvm/MC/MCSymbol.h" +#include "llvm/Support/TargetRegistry.h" +#include "llvm/Support/raw_ostream.h" +#include "llvm/Target/TargetLoweringObjectFile.h" +#include "llvm/Target/TargetOptions.h" +#include + +using namespace llvm; + +#define DEBUG_TYPE "m68k-asm-printer" + +bool M68kAsmPrinter::runOnMachineFunction(MachineFunction &MF) { + MMFI = MF.getInfo(); + MCInstLowering = std::make_unique(MF, *this); + AsmPrinter::runOnMachineFunction(MF); + return true; +} + +void M68kAsmPrinter::emitInstruction(const MachineInstr *MI) { + switch (MI->getOpcode()) { + default: { + if (MI->isPseudo()) { + LLVM_DEBUG(dbgs() << "Pseudo opcode(" << MI->getOpcode() + << ") found in EmitInstruction()\n"); + llvm_unreachable("Cannot proceed"); + } + break; + } + case M68k::TAILJMPj: + case M68k::TAILJMPq: + // Lower these as normal, but add some comments. + OutStreamer->AddComment("TAILCALL"); + break; + } + + MCInst TmpInst0; + MCInstLowering->Lower(MI, TmpInst0); + OutStreamer->emitInstruction(TmpInst0, getSubtargetInfo()); +} + +void M68kAsmPrinter::emitFunctionBodyStart() { + // TODO #33 +} + +void M68kAsmPrinter::emitFunctionBodyEnd() { + // TODO #33 +} + +void M68kAsmPrinter::emitStartOfAsmFile(Module &M) { + OutStreamer->emitSyntaxDirective(); +} + +void M68kAsmPrinter::emitEndOfAsmFile(Module &M) {} + +extern "C" void LLVMInitializeM68kAsmPrinter() { + RegisterAsmPrinter X(TheM68kTarget); +} Index: llvm/lib/Target/M68k/M68kCallingConv.h =================================================================== --- /dev/null +++ llvm/lib/Target/M68k/M68kCallingConv.h @@ -0,0 +1,78 @@ +//===-- M68kCallingConv.h - M68k Custom CC Routines ---------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file contains the custom routines for the M68k Calling Convention +/// that aren't done by tablegen. +/// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIB_TARGET_M68K_M68KCALLINGCONV_H +#define LLVM_LIB_TARGET_M68K_M68KCALLINGCONV_H + +#include "MCTargetDesc/M68kMCTargetDesc.h" + +#include "llvm/CodeGen/CallingConvLower.h" +#include "llvm/IR/CallingConv.h" +#include "llvm/IR/Function.h" + +namespace llvm { + +/// Custom state to propagate llvm type info to register CC assigner +class M68kCCState : public CCState { +public: + const llvm::Function &F; + + M68kCCState(const llvm::Function &F, CallingConv::ID CC, bool isVarArg, + MachineFunction &MF, SmallVectorImpl &locs, + LLVMContext &C) + : CCState(CC, isVarArg, MF, locs, C), F(F) {} +}; + +/// NOTE this function is used to select registers for formal arguments and call +/// TODO #34 Need to assigne all the pointers first +inline bool CC_M68k_Any_AssignToReg(unsigned &ValNo, MVT &ValVT, MVT &LocVT, + CCValAssign::LocInfo &LocInfo, + ISD::ArgFlagsTy &ArgFlags, CCState &State) { + M68kCCState CCInfo = static_cast(State); + + static const MCPhysReg DataRegList[] = {M68k::D0, M68k::D1, M68k::A0, + M68k::A1}; + + // Address registers have %a register priority + static const MCPhysReg AddrRegList[] = { + M68k::A0, + M68k::A1, + M68k::D0, + M68k::D1, + }; + + // FIXME: This is probably wrong + auto I = CCInfo.F.arg_begin(); + int No = ValNo; + while (No > 0) { + No -= I->getType()->isIntegerTy(64) ? 2 : 1; + I++; + } + + bool isPtr = I != CCInfo.F.arg_end() && I->getType()->isPointerTy(); + + unsigned Reg = + isPtr ? State.AllocateReg(AddrRegList) : State.AllocateReg(DataRegList); + + if (Reg) { + State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); + return true; + } + + return false; +} + +} // namespace llvm + +#endif Index: llvm/lib/Target/M68k/M68kCollapseMOVEMPass.cpp =================================================================== --- /dev/null +++ llvm/lib/Target/M68k/M68kCollapseMOVEMPass.cpp @@ -0,0 +1,297 @@ +//===----- M68kCollapseMOVEMPass.cpp - Expand MOVEM pass --------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file contains a pass that collapses sequential MOVEM instructions into +/// a single one. +/// +//===----------------------------------------------------------------------===// + +#include "M68k.h" +#include "M68kFrameLowering.h" +#include "M68kInstrInfo.h" +#include "M68kMachineFunction.h" +#include "M68kSubtarget.h" + +#include "llvm/Analysis/EHPersonalities.h" +#include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/IR/GlobalValue.h" +#include "llvm/Support/MathExtras.h" + +using namespace llvm; + +#define DEBUG_TYPE "M68k-collapse-movem" + +namespace { + +enum UpdateType { Ascending, Descending, Intermixed }; + +struct MOVEMState { +private: + MachineBasicBlock::iterator Begin; + MachineBasicBlock::iterator End; + + unsigned Base; + + int Start; + int Stop; + + unsigned Mask; + + enum { None, Load, Store } Type; + +public: + MOVEMState() + : Begin(nullptr), End(nullptr), Base(0), Start(INT_MIN), Stop(INT_MAX), + Mask(0), Type(None) {} + + void setBegin(MachineBasicBlock::iterator &MI) { + assert(Begin == nullptr); + Begin = MI; + } + + void setEnd(MachineBasicBlock::iterator &MI) { + assert(End == nullptr); + End = MI; + } + + bool hasBase() { return Base != 0; } + + unsigned getBase() { + assert(Base); + return Base; + } + + MachineBasicBlock::iterator begin() { + assert(Begin != nullptr); + return Begin; + } + + MachineBasicBlock::iterator end() { + assert(End != nullptr); + return End; + } + + unsigned getMask() { return Mask; } + + void setBase(int Value) { + assert(!hasBase()); + Base = Value; + } + + // You need to call this before Mask update + UpdateType classifyUpdateByMask(unsigned Value) { + assert(Value); + + if (Mask == 0) { + return Ascending; + } else if (Mask < Value) { + return Ascending; + } else if (Mask > Value) { + return Descending; + } + + return Intermixed; + } + + bool update(int O, int M) { + UpdateType Type = classifyUpdateByMask(M); + if (Type == Intermixed) + return false; + if (Start == INT_MIN) { + Start = Stop = O; + updateMask(M); + return true; + } else if (Type == Descending && O == Start - 4) { + Start -= 4; + updateMask(M); + return true; + } else if (Type == Ascending && O == Stop + 4) { + Stop += 4; + updateMask(M); + return true; + } + + return false; + } + + int getFinalOffset() { + // Since MOVEM in control mode increment the address on each iteration + assert(Start != INT_MIN); + return Start; + } + + bool updateMask(unsigned Value) { + assert(isUInt<16>(Value) && "Mask must fit 16 bit"); + assert(!(Value & Mask) && + "This is weird, there should be no intersections"); + Mask |= Value; + return true; + } + + void setLoad() { Type = Load; } + void setStore() { Type = Store; } + + bool isLoad() { return Type == Load; } + bool isStore() { return Type == Store; } +}; + +class M68kCollapseMOVEM : public MachineFunctionPass { +public: + static char ID; + + const M68kSubtarget *STI; + const M68kInstrInfo *TII; + const M68kRegisterInfo *TRI; + const M68kMachineFunctionInfo *MFI; + const M68kFrameLowering *FL; + + M68kCollapseMOVEM() : MachineFunctionPass(ID) {} + + void Finish(MachineBasicBlock &MBB, MOVEMState &State) { + auto MI = State.begin(); + auto End = State.end(); + auto DL = MI->getDebugLoc(); + + // No need to delete then add a single instruction + if (std::next(MI) == End) { + State = MOVEMState(); + return; + } + + // Delete all the MOVEM instruction till the end + while (MI != End) { + auto Next = std::next(MI); + MBB.erase(MI); + MI = Next; + } + + // Add a unified one + if (State.isLoad()) { + BuildMI(MBB, End, DL, TII->get(M68k::MOVM32mp)) + .addImm(State.getMask()) + .addImm(State.getFinalOffset()) + .addReg(State.getBase()); + } else { + BuildMI(MBB, End, DL, TII->get(M68k::MOVM32pm)) + .addImm(State.getFinalOffset()) + .addReg(State.getBase()) + .addImm(State.getMask()); + } + + State = MOVEMState(); + } + + bool ProcessMI(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, + MOVEMState &State, unsigned Mask, int Offset, unsigned Reg, + bool isStore = false) { + if (State.hasBase()) { + // If current Type, Reg, Offset and Mask is in proper order then + // merge in the state + MOVEMState Temp = State; + if (State.isStore() == isStore && State.getBase() == Reg && + State.update(Offset, Mask)) { + return true; + // Otherwise we Finish processing of the current MOVEM sequance and + // start a new one + } else { + State = Temp; + State.setEnd(MI); + Finish(MBB, State); + return ProcessMI(MBB, MI, State, Mask, Offset, Reg, isStore); + } + // If this is the first instruction is sequance then initialize the State + } else if (Reg == TRI->getStackRegister() || + Reg == TRI->getBaseRegister() || + Reg == TRI->getFrameRegister(*MBB.getParent())) { + State.setBegin(MI); + State.setBase(Reg); + State.update(Offset, Mask); + isStore ? State.setStore() : State.setLoad(); + return true; + } + return false; + } + + bool runOnMachineFunction(MachineFunction &MF) override { + STI = &static_cast(MF.getSubtarget()); + TII = STI->getInstrInfo(); + TRI = STI->getRegisterInfo(); + MFI = MF.getInfo(); + FL = STI->getFrameLowering(); + + bool Modified = false; + + MOVEMState State; + + unsigned Mask = 0; + unsigned Reg = 0; + int Offset = 0; + + for (auto &MBB : MF) { + auto MI = MBB.begin(), E = MBB.end(); + while (MI != E) { + // Processing might change current instruction, save next first + auto NMI = std::next(MI); + switch (MI->getOpcode()) { + default: + if (State.hasBase()) { + State.setEnd(MI); + Finish(MBB, State); + Modified = true; + } + break; + case M68k::MOVM32jm: + Mask = MI->getOperand(1).getImm(); + Reg = MI->getOperand(0).getReg(); + Offset = 0; + Modified |= ProcessMI(MBB, MI, State, Mask, Offset, Reg, true); + break; + case M68k::MOVM32pm: + Mask = MI->getOperand(2).getImm(); + Reg = MI->getOperand(1).getReg(); + Offset = MI->getOperand(0).getImm(); + Modified |= ProcessMI(MBB, MI, State, Mask, Offset, Reg, true); + break; + case M68k::MOVM32mj: + Mask = MI->getOperand(0).getImm(); + Reg = MI->getOperand(1).getReg(); + Offset = 0; + Modified |= ProcessMI(MBB, MI, State, Mask, Offset, Reg, false); + break; + case M68k::MOVM32mp: + Mask = MI->getOperand(0).getImm(); + Reg = MI->getOperand(2).getReg(); + Offset = MI->getOperand(1).getImm(); + Modified |= ProcessMI(MBB, MI, State, Mask, Offset, Reg, false); + break; + } + MI = NMI; + } + + if (State.hasBase()) { + State.setEnd(MI); + Finish(MBB, State); + } + } + + return Modified; + } + + StringRef getPassName() const override { return "M68k MOVEM collapser pass"; } +}; + +char M68kCollapseMOVEM::ID = 0; +} // anonymous namespace. + +/// Returns an instance of the pseudo instruction expansion pass. +FunctionPass *llvm::createM68kCollapseMOVEMPass() { + return new M68kCollapseMOVEM(); +} Index: llvm/lib/Target/M68k/M68kExpandPseudo.cpp =================================================================== --- /dev/null +++ llvm/lib/Target/M68k/M68kExpandPseudo.cpp @@ -0,0 +1,328 @@ +//===--M68kExpandPseudo.cpp - Expand pseudo instructions ------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file contains a pass that expands pseudo instructions into target +/// instructions to allow proper scheduling, if-conversion, other late +/// optimizations, or simply the encoding of the instructions. +/// +//===----------------------------------------------------------------------===// + +#include "M68k.h" +#include "M68kFrameLowering.h" +#include "M68kInstrInfo.h" +#include "M68kMachineFunction.h" +#include "M68kSubtarget.h" + +#include "llvm/Analysis/EHPersonalities.h" +#include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/Passes.h" // For IDs of passes that are preserved. +#include "llvm/IR/GlobalValue.h" + +using namespace llvm; + +#define DEBUG_TYPE "M68k-expand-pseudos" + +namespace { +class M68kExpandPseudo : public MachineFunctionPass { +public: + static char ID; + M68kExpandPseudo() : MachineFunctionPass(ID) {} + + void getAnalysisUsage(AnalysisUsage &AU) const override { + AU.setPreservesCFG(); + AU.addPreservedID(MachineLoopInfoID); + AU.addPreservedID(MachineDominatorsID); + MachineFunctionPass::getAnalysisUsage(AU); + } + + const M68kSubtarget *STI; + const M68kInstrInfo *TII; + const M68kRegisterInfo *TRI; + const M68kMachineFunctionInfo *MFI; + const M68kFrameLowering *FL; + + bool runOnMachineFunction(MachineFunction &Fn) override; + + MachineFunctionProperties getRequiredProperties() const override { + return MachineFunctionProperties().set( + MachineFunctionProperties::Property::NoVRegs); + } + + StringRef getPassName() const override { + return "M68k pseudo instruction expansion pass"; + } + +private: + bool ExpandMI(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI); + bool ExpandMBB(MachineBasicBlock &MBB); +}; +char M68kExpandPseudo::ID = 0; +} // End anonymous namespace. + +/// If \p MBBI is a pseudo instruction, this method expands +/// it to the corresponding (sequence of) actual instruction(s). +/// \returns true if \p MBBI has been expanded. +bool M68kExpandPseudo::ExpandMI(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MBBI) { + MachineInstr &MI = *MBBI; + MachineInstrBuilder MIB(*MI.getParent()->getParent(), MI); + unsigned Opcode = MI.getOpcode(); + DebugLoc DL = MBBI->getDebugLoc(); + switch (Opcode) { + default: + return false; + /// TODO #37 would be nice to infer all these parameters + + case M68k::MOVXd16d8: + return TII->ExpandMOVX_RR(MIB, MVT::i16, MVT::i8); + case M68k::MOVXd32d8: + return TII->ExpandMOVX_RR(MIB, MVT::i32, MVT::i8); + case M68k::MOVXd32d16: + return TII->ExpandMOVX_RR(MIB, MVT::i32, MVT::i16); + + case M68k::MOVSXd16d8: + return TII->ExpandMOVSZX_RR(MIB, true, MVT::i16, MVT::i8); + case M68k::MOVSXd32d8: + return TII->ExpandMOVSZX_RR(MIB, true, MVT::i32, MVT::i8); + case M68k::MOVSXd32d16: + return TII->ExpandMOVSZX_RR(MIB, true, MVT::i32, MVT::i16); + + case M68k::MOVZXd16d8: + return TII->ExpandMOVSZX_RR(MIB, false, MVT::i16, MVT::i8); + case M68k::MOVZXd32d8: + return TII->ExpandMOVSZX_RR(MIB, false, MVT::i32, MVT::i8); + case M68k::MOVZXd32d16: + return TII->ExpandMOVSZX_RR(MIB, false, MVT::i32, MVT::i16); + + case M68k::MOVSXd16j8: + return TII->ExpandMOVSZX_RM(MIB, true, TII->get(M68k::MOV8dj), MVT::i16, + MVT::i8); + case M68k::MOVSXd32j8: + return TII->ExpandMOVSZX_RM(MIB, true, TII->get(M68k::MOV8dj), MVT::i32, + MVT::i8); + case M68k::MOVSXd32j16: + return TII->ExpandMOVSZX_RM(MIB, true, TII->get(M68k::MOV16rj), MVT::i32, + MVT::i16); + + case M68k::MOVZXd16j8: + return TII->ExpandMOVSZX_RM(MIB, false, TII->get(M68k::MOV8dj), MVT::i16, + MVT::i8); + case M68k::MOVZXd32j8: + return TII->ExpandMOVSZX_RM(MIB, false, TII->get(M68k::MOV8dj), MVT::i32, + MVT::i8); + case M68k::MOVZXd32j16: + return TII->ExpandMOVSZX_RM(MIB, false, TII->get(M68k::MOV16rj), MVT::i32, + MVT::i16); + + case M68k::MOVSXd16p8: + return TII->ExpandMOVSZX_RM(MIB, true, TII->get(M68k::MOV8dp), MVT::i16, + MVT::i8); + case M68k::MOVSXd32p8: + return TII->ExpandMOVSZX_RM(MIB, true, TII->get(M68k::MOV8dp), MVT::i32, + MVT::i8); + case M68k::MOVSXd32p16: + return TII->ExpandMOVSZX_RM(MIB, true, TII->get(M68k::MOV16rp), MVT::i32, + MVT::i16); + + case M68k::MOVZXd16p8: + return TII->ExpandMOVSZX_RM(MIB, false, TII->get(M68k::MOV8dp), MVT::i16, + MVT::i8); + case M68k::MOVZXd32p8: + return TII->ExpandMOVSZX_RM(MIB, false, TII->get(M68k::MOV8dp), MVT::i32, + MVT::i8); + case M68k::MOVZXd32p16: + return TII->ExpandMOVSZX_RM(MIB, false, TII->get(M68k::MOV16rp), MVT::i32, + MVT::i16); + + case M68k::MOVSXd16f8: + return TII->ExpandMOVSZX_RM(MIB, true, TII->get(M68k::MOV8df), MVT::i16, + MVT::i8); + case M68k::MOVSXd32f8: + return TII->ExpandMOVSZX_RM(MIB, true, TII->get(M68k::MOV8df), MVT::i32, + MVT::i8); + case M68k::MOVSXd32f16: + return TII->ExpandMOVSZX_RM(MIB, true, TII->get(M68k::MOV16rf), MVT::i32, + MVT::i16); + + case M68k::MOVZXd16f8: + return TII->ExpandMOVSZX_RM(MIB, false, TII->get(M68k::MOV8df), MVT::i16, + MVT::i8); + case M68k::MOVZXd32f8: + return TII->ExpandMOVSZX_RM(MIB, false, TII->get(M68k::MOV8df), MVT::i32, + MVT::i8); + case M68k::MOVZXd32f16: + return TII->ExpandMOVSZX_RM(MIB, false, TII->get(M68k::MOV16rf), MVT::i32, + MVT::i16); + + case M68k::MOV8cd: + return TII->ExpandCCR(MIB, /* isToCCR */ true); + case M68k::MOV8dc: + return TII->ExpandCCR(MIB, /* isToCCR */ false); + + case M68k::MOVM8jm_P: + return TII->ExpandMOVEM(MIB, TII->get(M68k::MOVM32jm), /* isRM */ false); + case M68k::MOVM16jm_P: + return TII->ExpandMOVEM(MIB, TII->get(M68k::MOVM32jm), /* isRM */ false); + case M68k::MOVM32jm_P: + return TII->ExpandMOVEM(MIB, TII->get(M68k::MOVM32jm), /* isRM */ false); + + case M68k::MOVM8pm_P: + return TII->ExpandMOVEM(MIB, TII->get(M68k::MOVM32pm), /* isRM */ false); + case M68k::MOVM16pm_P: + return TII->ExpandMOVEM(MIB, TII->get(M68k::MOVM32pm), /* isRM */ false); + case M68k::MOVM32pm_P: + return TII->ExpandMOVEM(MIB, TII->get(M68k::MOVM32pm), /* isRM */ false); + + case M68k::MOVM8mj_P: + return TII->ExpandMOVEM(MIB, TII->get(M68k::MOVM32mj), /* isRM */ true); + case M68k::MOVM16mj_P: + return TII->ExpandMOVEM(MIB, TII->get(M68k::MOVM32mj), /* isRM */ true); + case M68k::MOVM32mj_P: + return TII->ExpandMOVEM(MIB, TII->get(M68k::MOVM32mj), /* isRM */ true); + + case M68k::MOVM8mp_P: + return TII->ExpandMOVEM(MIB, TII->get(M68k::MOVM32mp), /* isRM */ true); + case M68k::MOVM16mp_P: + return TII->ExpandMOVEM(MIB, TII->get(M68k::MOVM32mp), /* isRM */ true); + case M68k::MOVM32mp_P: + return TII->ExpandMOVEM(MIB, TII->get(M68k::MOVM32mp), /* isRM */ true); + + case M68k::TCRETURNq: + case M68k::TCRETURNj: { + MachineOperand &JumpTarget = MI.getOperand(0); + MachineOperand &StackAdjust = MI.getOperand(1); + assert(StackAdjust.isImm() && "Expecting immediate value."); + + // Adjust stack pointer. + int StackAdj = StackAdjust.getImm(); + int MaxTCDelta = MFI->getTCReturnAddrDelta(); + int Offset = 0; + assert(MaxTCDelta <= 0 && "MaxTCDelta should never be positive"); + + // Incoporate the retaddr area. + Offset = StackAdj - MaxTCDelta; + assert(Offset >= 0 && "Offset should never be negative"); + + if (Offset) { + // Check for possible merge with preceding ADD instruction. + Offset += FL->mergeSPUpdates(MBB, MBBI, true); + FL->emitSPUpdate(MBB, MBBI, Offset, /*InEpilogue=*/true); + } + + // Jump to label or value in register. + if (Opcode == M68k::TCRETURNq) { + MachineInstrBuilder MIB = + BuildMI(MBB, MBBI, DL, TII->get(M68k::TAILJMPq)); + if (JumpTarget.isGlobal()) { + MIB.addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(), + JumpTarget.getTargetFlags()); + } else { + assert(JumpTarget.isSymbol()); + MIB.addExternalSymbol(JumpTarget.getSymbolName(), + JumpTarget.getTargetFlags()); + } + } else { + BuildMI(MBB, MBBI, DL, TII->get(M68k::TAILJMPj)) + .addReg(JumpTarget.getReg(), RegState::Kill); + } + + MachineInstr &NewMI = *std::prev(MBBI); + NewMI.copyImplicitOps(*MBBI->getParent()->getParent(), *MBBI); + + // Delete the pseudo instruction TCRETURN. + MBB.erase(MBBI); + + return true; + } + case M68k::RET: { + // Adjust stack to erase error code + int64_t StackAdj = MBBI->getOperand(0).getImm(); + MachineInstrBuilder MIB; + + if (StackAdj == 0) { + MIB = BuildMI(MBB, MBBI, DL, TII->get(M68k::RTS)); + } else if (isUInt<16>(StackAdj)) { + + if (STI->atLeastM68020()) { + llvm_unreachable("RTD is not implemented"); + // MIB = BuildMI(MBB, MBBI, DL, TII->get(M68k::RTD)).addImm(StackAdj); + } else { + // Copy PC from stack to a free address(A0 or A1) register + // TODO #38 check if it is really free + BuildMI(MBB, MBBI, DL, TII->get(M68k::MOV32aj), M68k::A1) + .addReg(M68k::SP); + + // Adjust SP + FL->emitSPUpdate(MBB, MBBI, StackAdj, /*InEpilogue=*/true); + + // Put the return address on stack + BuildMI(MBB, MBBI, DL, TII->get(M68k::MOV32ja)) + .addReg(M68k::SP) + .addReg(M68k::A1); + + // RTS + BuildMI(MBB, MBBI, DL, TII->get(M68k::RTS)); + } + } else { + assert(false && "Oh really? You need to pop that much?"); + // RTD can only handle immediates as big as 2**16-1. If we need to pop + // off bytes before the return address, we must do it manually. + // + // BuildMI(MBB, MBBI, DL, TII->get(M68k::POP32r)).addReg(M68k::ECX, + // RegState::Define); FL->emitSPUpdate(MBB, MBBI, StackAdj, + // #<{(|InEpilogue=|)}>#true); BuildMI(MBB, MBBI, DL, + // TII->get(M68k::PUSH32r)).addReg(M68k::ECX); MIB = BuildMI(MBB, + // MBBI, DL, TII->get(M68k::RETL)); + } + + // ??? The rest can be ignored? + // for (unsigned I = 1, E = MBBI->getNumOperands(); I != E; ++I) + // MIB.addOperand(MBBI->getOperand(I)); + MBB.erase(MBBI); + return true; + } + } + llvm_unreachable("Previous switch has a fallthrough?"); +} + +/// Expand all pseudo instructions contained in \p MBB. +/// \returns true if any expansion occurred for \p MBB. +bool M68kExpandPseudo::ExpandMBB(MachineBasicBlock &MBB) { + bool Modified = false; + + // MBBI may be invalidated by the expansion. + MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end(); + while (MBBI != E) { + MachineBasicBlock::iterator NMBBI = std::next(MBBI); + Modified |= ExpandMI(MBB, MBBI); + MBBI = NMBBI; + } + + return Modified; +} + +bool M68kExpandPseudo::runOnMachineFunction(MachineFunction &MF) { + STI = &static_cast(MF.getSubtarget()); + TII = STI->getInstrInfo(); + TRI = STI->getRegisterInfo(); + MFI = MF.getInfo(); + FL = STI->getFrameLowering(); + + bool Modified = false; + for (MachineBasicBlock &MBB : MF) + Modified |= ExpandMBB(MBB); + return Modified; +} + +/// Returns an instance of the pseudo instruction expansion pass. +FunctionPass *llvm::createM68kExpandPseudoPass() { + return new M68kExpandPseudo(); +} Index: llvm/lib/Target/M68k/M68kFrameLowering.h =================================================================== --- /dev/null +++ llvm/lib/Target/M68k/M68kFrameLowering.h @@ -0,0 +1,181 @@ +//===- M68kFrameLowering.h - Define frame lowering for M68k -*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file contains the M68k declaration of TargetFrameLowering class. +/// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIB_TARGET_M68K_M68KFRAMELOWERING_H +#define LLVM_LIB_TARGET_M68K_M68KFRAMELOWERING_H + +#include "M68k.h" + +#include "llvm/CodeGen/TargetFrameLowering.h" + +namespace llvm { +class MachineInstrBuilder; +class MCCFIInstruction; +class M68kSubtarget; +class M68kRegisterInfo; +struct Align; + +class M68kFrameLowering : public TargetFrameLowering { + // Cached subtarget predicates. + const M68kSubtarget &STI; + const TargetInstrInfo &TII; + const M68kRegisterInfo *TRI; + + /// Stack slot size in bytes. + unsigned SlotSize; + + unsigned StackPtr; + + /// If we're forcing a stack realignment we can't rely on just the frame + /// info, we need to know the ABI stack alignment as well in case we have a + /// call out. Otherwise just make sure we have some alignment - we'll go + /// with the minimum SlotSize. + uint64_t calculateMaxStackAlign(const MachineFunction &MF) const; + + /// Adjusts the stack pointer using LEA, SUB, or ADD. + MachineInstrBuilder BuildStackAdjustment(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MBBI, + const DebugLoc &DL, int64_t Offset, + bool InEpilogue) const; + + /// Aligns the stack pointer by ANDing it with -MaxAlign. + void BuildStackAlignAND(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MBBI, const DebugLoc &DL, + unsigned Reg, uint64_t MaxAlign) const; + + /// Wraps up getting a CFI index and building a MachineInstr for it. + void BuildCFI(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, + const DebugLoc &DL, const MCCFIInstruction &CFIInst) const; + + void emitCalleeSavedFrameMoves(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MBBI, + const DebugLoc &DL) const; + + unsigned getPSPSlotOffsetFromSP(const MachineFunction &MF) const; + +public: + explicit M68kFrameLowering(const M68kSubtarget &sti, Align Alignment); + + static const M68kFrameLowering *create(const M68kSubtarget &ST); + + /// This method is called during prolog/epilog code insertion to eliminate + /// call frame setup and destroy pseudo instructions (but only if the Target + /// is using them). It is responsible for eliminating these instructions, + /// replacing them with concrete instructions. This method need only be + /// implemented if using call frame setup/destroy pseudo instructions. + /// Returns an iterator pointing to the instruction after the replaced one. + MachineBasicBlock::iterator + eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI) const override; + + /// Insert prolog code into the function. + void emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const override; + + /// Insert epilog code into the function. + void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override; + + /// This method determines which of the registers reported by + /// TargetRegisterInfo::getCalleeSavedRegs() should actually get saved. + /// The default implementation checks populates the \p SavedRegs bitset with + /// all registers which are modified in the function, targets may override + /// this function to save additional registers. + /// This method also sets up the register scavenger ensuring there is a free + /// register or a frameindex available. + void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, + RegScavenger *RS = nullptr) const override; + + /// Allows target to override spill slot assignment logic. If implemented, + /// assignCalleeSavedSpillSlots() should assign frame slots to all CSI + /// entries and return true. If this method returns false, spill slots will + /// be assigned using generic implementation. assignCalleeSavedSpillSlots() + /// may add, delete or rearrange elements of CSI. + bool + assignCalleeSavedSpillSlots(MachineFunction &MF, + const TargetRegisterInfo *TRI, + std::vector &CSI) const override; + + /// Issues instruction(s) to spill all callee saved registers and returns + /// true if it isn't possible / profitable to do so by issuing a series of + /// store instructions via storeRegToStackSlot(). Returns false otherwise. + bool spillCalleeSavedRegisters(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, + ArrayRef CSI, + const TargetRegisterInfo *TRI) const override; + + /// Issues instruction(s) to restore all callee saved registers and returns + /// true if it isn't possible / profitable to do so by issuing a series of + /// load instructions via loadRegToStackSlot(). Returns false otherwise. + bool + restoreCalleeSavedRegisters(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, + MutableArrayRef CSI, + const TargetRegisterInfo *TRI) const override; + + /// Return true if the specified function should have a dedicated frame + /// pointer register. This is true if the function has variable sized + /// allocas, if it needs dynamic stack realignment, if frame pointer + /// elimination is disabled, or if the frame address is taken. + bool hasFP(const MachineFunction &MF) const override; + + /// Under normal circumstances, when a frame pointer is not required, we + /// reserve argument space for call sites in the function immediately on + /// entry to the current function. This eliminates the need for add/sub sp + /// brackets around call sites. Returns true if the call frame is included as + /// part of the stack frame. + bool hasReservedCallFrame(const MachineFunction &MF) const override; + + /// If there is a reserved call frame, the call frame pseudos can be + /// simplified. Having a FP, as in the default implementation, is not + /// sufficient here since we can't always use it. Use a more nuanced + /// condition. + bool canSimplifyCallFramePseudos(const MachineFunction &MF) const override; + + // Do we need to perform FI resolution for this function. Normally, this is + // required only when the function has any stack objects. However, FI + // resolution actually has another job, not apparent from the title - it + // resolves callframe setup/destroy that were not simplified earlier. + // + // So, this is required for M68k functions that have push sequences even + // when there are no stack objects. + bool needsFrameIndexResolution(const MachineFunction &MF) const override; + + /// This method should return the base register and offset used to reference + /// a frame index location. The offset is returned directly, and the base + /// register is returned via FrameReg. + StackOffset getFrameIndexReference(const MachineFunction &MF, int FI, + Register &FrameReg) const override; + + /// Check the instruction before/after the passed instruction. If + /// it is an ADD/SUB/LEA instruction it is deleted argument and the + /// stack adjustment is returned as a positive value for ADD/LEA and + /// a negative for SUB. + int mergeSPUpdates(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, + bool doMergeWithPrevious) const; + + /// Emit a series of instructions to increment / decrement the stack + /// pointer by a constant value. + void emitSPUpdate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, + int64_t NumBytes, bool InEpilogue) const; + + /// TODO #39 + /// Order the symbols in the local stack. + /// We want to place the local stack objects in some sort of sensible order. + /// The heuristic we use is to try and pack them according to static number + /// of uses and size in order to minimize code size. + // void orderFrameObjects(const MachineFunction &MF, + // SmallVectorImpl &ObjectsToAllocate) const + // override; +}; +} // namespace llvm + +#endif Index: llvm/lib/Target/M68k/M68kFrameLowering.cpp =================================================================== --- /dev/null +++ llvm/lib/Target/M68k/M68kFrameLowering.cpp @@ -0,0 +1,921 @@ +//===-- M68kFrameLowering.cpp - M68k Frame Information ------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file contains the M68k implementation of TargetFrameLowering class. +/// +//===----------------------------------------------------------------------===// + +#include "M68kFrameLowering.h" + +#include "M68kInstrBuilder.h" +#include "M68kInstrInfo.h" +#include "M68kMachineFunction.h" +#include "M68kSubtarget.h" + +#include "llvm/ADT/SmallSet.h" +#include "llvm/CodeGen/MachineFrameInfo.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineModuleInfo.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/IR/DataLayout.h" +#include "llvm/IR/Function.h" +#include "llvm/Support/Alignment.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Target/TargetMachine.h" +#include "llvm/Target/TargetOptions.h" + +using namespace llvm; + +M68kFrameLowering::M68kFrameLowering(const M68kSubtarget &STI, Align Alignment) + : TargetFrameLowering(StackGrowsDown, Alignment, -4), STI(STI), + TII(*STI.getInstrInfo()), TRI(STI.getRegisterInfo()) { + SlotSize = STI.getSlotSize(); + StackPtr = TRI->getStackRegister(); +} + +bool M68kFrameLowering::hasFP(const MachineFunction &MF) const { + const MachineFrameInfo &MFI = MF.getFrameInfo(); + const TargetRegisterInfo *TRI = STI.getRegisterInfo(); + + return MF.getTarget().Options.DisableFramePointerElim(MF) || + MFI.hasVarSizedObjects() || MFI.isFrameAddressTaken() || + TRI->needsStackRealignment(MF); +} + +// FIXME #6 not only pushes.... +bool M68kFrameLowering::hasReservedCallFrame(const MachineFunction &MF) const { + return !MF.getFrameInfo().hasVarSizedObjects() && + !MF.getInfo()->getHasPushSequences(); +} + +bool M68kFrameLowering::canSimplifyCallFramePseudos( + const MachineFunction &MF) const { + return hasReservedCallFrame(MF) || + (hasFP(MF) && !TRI->needsStackRealignment(MF)) || + TRI->hasBasePointer(MF); +} + +bool M68kFrameLowering::needsFrameIndexResolution( + const MachineFunction &MF) const { + return MF.getFrameInfo().hasStackObjects() || + MF.getInfo()->getHasPushSequences(); +} + +// NOTE: this only has a subset of the full frame index logic. In +// particular, the FI < 0 and AfterFPPop logic is handled in +// M68kRegisterInfo::eliminateFrameIndex, but not here. Possibly +// (probably?) it should be moved into here. +StackOffset +M68kFrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI, + Register &FrameReg) const { + const MachineFrameInfo &MFI = MF.getFrameInfo(); + + // We can't calculate offset from frame pointer if the stack is realigned, + // so enforce usage of stack/base pointer. The base pointer is used when we + // have dynamic allocas in addition to dynamic realignment. + if (TRI->hasBasePointer(MF)) + FrameReg = TRI->getBaseRegister(); + else if (TRI->needsStackRealignment(MF)) + FrameReg = TRI->getStackRegister(); + else + FrameReg = TRI->getFrameRegister(MF); + + // Offset will hold the offset from the stack pointer at function entry to the + // object. + // We need to factor in additional offsets applied during the prologue to the + // frame, base, and stack pointer depending on which is used. + int Offset = MFI.getObjectOffset(FI) - getOffsetOfLocalArea(); + const M68kMachineFunctionInfo *MMFI = MF.getInfo(); + uint64_t StackSize = MFI.getStackSize(); + bool HasFP = hasFP(MF); + + if (TRI->hasBasePointer(MF)) { + assert(HasFP && "VLAs and dynamic stack realign, but no FP?!"); + if (FI < 0) { + // Skip the saved FP. + return StackOffset::getFixed(Offset + SlotSize); + } else { + assert((-(Offset + StackSize)) % MFI.getObjectAlign(FI).value() == 0); + return StackOffset::getFixed(Offset + StackSize); + } + } else if (TRI->needsStackRealignment(MF)) { + if (FI < 0) { + // Skip the saved FP. + return StackOffset::getFixed(Offset + SlotSize); + } else { + assert((-(Offset + StackSize)) % MFI.getObjectAlign(FI).value() == 0); + return StackOffset::getFixed(Offset + StackSize); + } + // FIXME: #7 Support tail calls + } else { + if (!HasFP) + return StackOffset::getFixed(Offset + StackSize); + + // Skip the saved FP. + Offset += SlotSize; + + // Skip the RETADDR move area + int TailCallReturnAddrDelta = MMFI->getTCReturnAddrDelta(); + if (TailCallReturnAddrDelta < 0) + Offset -= TailCallReturnAddrDelta; + } + + return StackOffset::getFixed(Offset); +} + +static unsigned getSUBriOpcode(int64_t Imm) { return M68k::SUB32ri; } + +static unsigned getADDriOpcode(int64_t Imm) { return M68k::ADD32ri; } + +static unsigned getSUBrrOpcode() { return M68k::SUB32rr; } + +static unsigned getADDrrOpcode() { return M68k::ADD32rr; } + +static unsigned getANDriOpcode(int64_t Imm) { return M68k::AND32di; } + +static unsigned getLEArOpcode() { return M68k::LEA32p; } + +static unsigned getMOVrrOpcode() { return M68k::MOV32rr; } + +/// Return a caller-saved register that isn't live +/// when it reaches the "return" instruction. We can then pop a stack object +/// to this register without worry about clobbering it. +static unsigned findDeadCallerSavedReg(MachineBasicBlock &MBB, + MachineBasicBlock::iterator &MBBI, + const M68kRegisterInfo *TRI) { + const MachineFunction *MF = MBB.getParent(); + if (MF->callsEHReturn()) + return 0; + + const TargetRegisterClass &AvailableRegs = *TRI->getRegsForTailCall(*MF); + + if (MBBI == MBB.end()) + return 0; + + switch (MBBI->getOpcode()) { + default: + return 0; + case TargetOpcode::PATCHABLE_RET: + case M68k::RET: { + SmallSet Uses; + + for (unsigned i = 0, e = MBBI->getNumOperands(); i != e; ++i) { + MachineOperand &MO = MBBI->getOperand(i); + if (!MO.isReg() || MO.isDef()) + continue; + unsigned Reg = MO.getReg(); + if (!Reg) + continue; + for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) + Uses.insert(*AI); + } + + for (auto CS : AvailableRegs) + if (!Uses.count(CS)) + return CS; + } + } + + return 0; +} + +static bool isRegLiveIn(MachineBasicBlock &MBB, unsigned Reg) { + return llvm::any_of(MBB.liveins(), + [Reg](MachineBasicBlock::RegisterMaskPair RegMask) { + return RegMask.PhysReg == Reg; + }); +} + +uint64_t +M68kFrameLowering::calculateMaxStackAlign(const MachineFunction &MF) const { + const MachineFrameInfo &MFI = MF.getFrameInfo(); + uint64_t MaxAlign = MFI.getMaxAlign().value(); // Desired stack alignment. + unsigned StackAlign = getStackAlignment(); // ABI alignment + if (MF.getFunction().hasFnAttribute("stackrealign")) { + if (MFI.hasCalls()) + MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign; + else if (MaxAlign < SlotSize) + MaxAlign = SlotSize; + } + return MaxAlign; +} + +void M68kFrameLowering::BuildStackAlignAND(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MBBI, + const DebugLoc &DL, unsigned Reg, + uint64_t MaxAlign) const { + uint64_t Val = -MaxAlign; + unsigned AndOp = getANDriOpcode(Val); + unsigned MovOp = getMOVrrOpcode(); + + // This function is normally used with SP which is Address Register, but AND, + // or any other logical instructions in M68k do not support ARs so we need + // to use a temp Data Register to perform the op. + unsigned Tmp = M68k::D0; + + BuildMI(MBB, MBBI, DL, TII.get(MovOp), Tmp) + .addReg(Reg) + .setMIFlag(MachineInstr::FrameSetup); + + MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(AndOp), Tmp) + .addReg(Tmp) + .addImm(Val) + .setMIFlag(MachineInstr::FrameSetup); + + // The CCR implicit def is dead. + MI->getOperand(3).setIsDead(); + + BuildMI(MBB, MBBI, DL, TII.get(MovOp), Reg) + .addReg(Tmp) + .setMIFlag(MachineInstr::FrameSetup); +} + +MachineBasicBlock::iterator M68kFrameLowering::eliminateCallFramePseudoInstr( + MachineFunction &MF, MachineBasicBlock &MBB, + MachineBasicBlock::iterator I) const { + bool reserveCallFrame = hasReservedCallFrame(MF); + unsigned Opcode = I->getOpcode(); + bool isDestroy = Opcode == TII.getCallFrameDestroyOpcode(); + DebugLoc DL = I->getDebugLoc(); + uint64_t Amount = !reserveCallFrame ? I->getOperand(0).getImm() : 0; + uint64_t InternalAmt = (isDestroy && Amount) ? I->getOperand(1).getImm() : 0; + I = MBB.erase(I); + + if (!reserveCallFrame) { + // If the stack pointer can be changed after prologue, turn the + // adjcallstackup instruction into a 'sub %SP, ' and the + // adjcallstackdown instruction into 'add %SP, ' + + // We need to keep the stack aligned properly. To do this, we round the + // amount of space needed for the outgoing arguments up to the next + // alignment boundary. + unsigned StackAlign = getStackAlignment(); + Amount = alignTo(Amount, StackAlign); + + MachineModuleInfo &MMI = MF.getMMI(); + const auto &Fn = MF.getFunction(); + bool DwarfCFI = MMI.hasDebugInfo() || Fn.needsUnwindTableEntry(); + + // If we have any exception handlers in this function, and we adjust + // the SP before calls, we may need to indicate this to the unwinder + // using GNU_ARGS_SIZE. Note that this may be necessary even when + // Amount == 0, because the preceding function may have set a non-0 + // GNU_ARGS_SIZE. + // TODO: We don't need to reset this between subsequent functions, + // if it didn't change. + bool HasDwarfEHHandlers = !MF.getLandingPads().empty(); + + if (HasDwarfEHHandlers && !isDestroy && + MF.getInfo()->getHasPushSequences()) { + BuildCFI(MBB, I, DL, + MCCFIInstruction::createGnuArgsSize(nullptr, Amount)); + } + + if (Amount == 0) + return I; + + // Factor out the amount that gets handled inside the sequence + // (Pushes of argument for frame setup, callee pops for frame destroy) + Amount -= InternalAmt; + + // TODO: This is needed only if we require precise CFA. + // If this is a callee-pop calling convention, emit a CFA adjust for + // the amount the callee popped. + if (isDestroy && InternalAmt && DwarfCFI && !hasFP(MF)) + BuildCFI(MBB, I, DL, + MCCFIInstruction::createAdjustCfaOffset(nullptr, -InternalAmt)); + + // Add Amount to SP to destroy a frame, or subtract to setup. + int64_t StackAdjustment = isDestroy ? Amount : -Amount; + int64_t CfaAdjustment = -StackAdjustment; + + if (StackAdjustment) { + // Merge with any previous or following adjustment instruction. Note: the + // instructions merged with here do not have CFI, so their stack + // adjustments do not feed into CfaAdjustment. + StackAdjustment += mergeSPUpdates(MBB, I, true); + StackAdjustment += mergeSPUpdates(MBB, I, false); + + if (StackAdjustment) { + BuildStackAdjustment(MBB, I, DL, StackAdjustment, false); + } + } + + if (DwarfCFI && !hasFP(MF)) { + // If we don't have FP, but need to generate unwind information, + // we need to set the correct CFA offset after the stack adjustment. + // How much we adjust the CFA offset depends on whether we're emitting + // CFI only for EH purposes or for debugging. EH only requires the CFA + // offset to be correct at each call site, while for debugging we want + // it to be more precise. + + // TODO: When not using precise CFA, we also need to adjust for the + // InternalAmt here. + if (CfaAdjustment) { + BuildCFI( + MBB, I, DL, + MCCFIInstruction::createAdjustCfaOffset(nullptr, CfaAdjustment)); + } + } + + return I; + } + + if (isDestroy && InternalAmt) { + // If we are performing frame pointer elimination and if the callee pops + // something off the stack pointer, add it back. We do this until we have + // more advanced stack pointer tracking ability. + // We are not tracking the stack pointer adjustment by the callee, so make + // sure we restore the stack pointer immediately after the call, there may + // be spill code inserted between the CALL and ADJCALLSTACKUP instructions. + MachineBasicBlock::iterator CI = I; + MachineBasicBlock::iterator B = MBB.begin(); + while (CI != B && !std::prev(CI)->isCall()) + --CI; + BuildStackAdjustment(MBB, CI, DL, -InternalAmt, /*InEpilogue=*/false); + } + + return I; +} + +/// Emit a series of instructions to increment / decrement the stack pointer by +/// a constant value. +void M68kFrameLowering::emitSPUpdate(MachineBasicBlock &MBB, + MachineBasicBlock::iterator &MBBI, + int64_t NumBytes, bool InEpilogue) const { + bool isSub = NumBytes < 0; + uint64_t Offset = isSub ? -NumBytes : NumBytes; + + uint64_t Chunk = (1LL << 31) - 1; + DebugLoc DL = MBB.findDebugLoc(MBBI); + + while (Offset) { + if (Offset > Chunk) { + // Rather than emit a long series of instructions for large offsets, + // load the offset into a register and do one sub/add + unsigned Reg = 0; + + if (isSub && !isRegLiveIn(MBB, M68k::D0)) + Reg = (unsigned)(M68k::D0); + else + Reg = findDeadCallerSavedReg(MBB, MBBI, TRI); + + if (Reg) { + unsigned Opc = M68k::MOV32ri; + BuildMI(MBB, MBBI, DL, TII.get(Opc), Reg).addImm(Offset); + Opc = isSub ? getSUBrrOpcode() : getADDrrOpcode(); + MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr) + .addReg(StackPtr) + .addReg(Reg); + // ??? still no CCR + MI->getOperand(3).setIsDead(); // The CCR implicit def is dead. + Offset = 0; + continue; + } + } + + uint64_t ThisVal = std::min(Offset, Chunk); + + MachineInstrBuilder MI = BuildStackAdjustment( + MBB, MBBI, DL, isSub ? -ThisVal : ThisVal, InEpilogue); + if (isSub) + MI.setMIFlag(MachineInstr::FrameSetup); + else + MI.setMIFlag(MachineInstr::FrameDestroy); + + Offset -= ThisVal; + } +} + +int M68kFrameLowering::mergeSPUpdates(MachineBasicBlock &MBB, + MachineBasicBlock::iterator &MBBI, + bool doMergeWithPrevious) const { + if ((doMergeWithPrevious && MBBI == MBB.begin()) || + (!doMergeWithPrevious && MBBI == MBB.end())) + return 0; + + MachineBasicBlock::iterator PI = doMergeWithPrevious ? std::prev(MBBI) : MBBI; + MachineBasicBlock::iterator NI = + doMergeWithPrevious ? nullptr : std::next(MBBI); + unsigned Opc = PI->getOpcode(); + int Offset = 0; + + if (!doMergeWithPrevious && NI != MBB.end() && + NI->getOpcode() == TargetOpcode::CFI_INSTRUCTION) { + // Don't merge with the next instruction if it has CFI. + return Offset; + } + + if (Opc == M68k::ADD32ri && PI->getOperand(0).getReg() == StackPtr) { + assert(PI->getOperand(1).getReg() == StackPtr); + Offset += PI->getOperand(2).getImm(); + MBB.erase(PI); + if (!doMergeWithPrevious) + MBBI = NI; + // TODO #40 check this + // } else if (Opc == M68k::LEA32p && + // PI->getOperand(0).getReg() == StackPtr && + // PI->getOperand(2).getReg() == StackPtr) { + // Offset += PI->getOperand(1).getImm(); + // MBB.erase(PI); + // if (!doMergeWithPrevious) MBBI = NI; + } else if (Opc == M68k::SUB32ri && PI->getOperand(0).getReg() == StackPtr) { + assert(PI->getOperand(1).getReg() == StackPtr); + Offset -= PI->getOperand(2).getImm(); + MBB.erase(PI); + if (!doMergeWithPrevious) + MBBI = NI; + } + + return Offset; +} + +MachineInstrBuilder M68kFrameLowering::BuildStackAdjustment( + MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, + const DebugLoc &DL, int64_t Offset, bool InEpilogue) const { + assert(Offset != 0 && "zero offset stack adjustment requested"); + + // TODO #8 in the original code for M68k Atom uses lea to adjust stack as an + // optimization, can be be this applied for M68k? + + bool IsSub = Offset < 0; + uint64_t AbsOffset = IsSub ? -Offset : Offset; + unsigned Opc = IsSub ? getSUBriOpcode(AbsOffset) : getADDriOpcode(AbsOffset); + + MachineInstrBuilder MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr) + .addReg(StackPtr) + .addImm(AbsOffset); + // FIXME #9 ATM there is no CCR in these inst + MI->getOperand(3).setIsDead(); // The CCR implicit def is dead. + return MI; +} + +void M68kFrameLowering::BuildCFI(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MBBI, + const DebugLoc &DL, + const MCCFIInstruction &CFIInst) const { + MachineFunction &MF = *MBB.getParent(); + unsigned CFIIndex = MF.addFrameInst(CFIInst); + BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION)) + .addCFIIndex(CFIIndex); +} + +void M68kFrameLowering::emitCalleeSavedFrameMoves( + MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, + const DebugLoc &DL) const { + MachineFunction &MF = *MBB.getParent(); + MachineFrameInfo &MFI = MF.getFrameInfo(); + MachineModuleInfo &MMI = MF.getMMI(); + const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo(); + + // Add callee saved registers to move list. + const auto &CSI = MFI.getCalleeSavedInfo(); + if (CSI.empty()) + return; + + // Calculate offsets. + for (const auto &I : CSI) { + int64_t Offset = MFI.getObjectOffset(I.getFrameIdx()); + unsigned Reg = I.getReg(); + + unsigned DwarfReg = MRI->getDwarfRegNum(Reg, true); + BuildCFI(MBB, MBBI, DL, + MCCFIInstruction::createOffset(nullptr, DwarfReg, Offset)); + } +} + +void M68kFrameLowering::emitPrologue(MachineFunction &MF, + MachineBasicBlock &MBB) const { + assert(&STI == &MF.getSubtarget() && + "MF used frame lowering for wrong subtarget"); + + MachineBasicBlock::iterator MBBI = MBB.begin(); + MachineFrameInfo &MFI = MF.getFrameInfo(); + const auto &Fn = MF.getFunction(); + MachineModuleInfo &MMI = MF.getMMI(); + M68kMachineFunctionInfo *MMFI = MF.getInfo(); + uint64_t MaxAlign = calculateMaxStackAlign(MF); // Desired stack alignment. + uint64_t StackSize = MFI.getStackSize(); // Number of bytes to allocate. + bool HasFP = hasFP(MF); + bool NeedsDwarfCFI = MMI.hasDebugInfo() || Fn.needsUnwindTableEntry(); + unsigned FramePtr = TRI->getFrameRegister(MF); + const unsigned MachineFramePtr = FramePtr; + unsigned BasePtr = TRI->getBaseRegister(); + + // Debug location must be unknown since the first debug location is used + // to determine the end of the prologue. + DebugLoc DL; + + // Add RETADDR move area to callee saved frame size. + int TailCallReturnAddrDelta = MMFI->getTCReturnAddrDelta(); + + if (TailCallReturnAddrDelta < 0) { + MMFI->setCalleeSavedFrameSize(MMFI->getCalleeSavedFrameSize() - + TailCallReturnAddrDelta); + } + + // Insert stack pointer adjustment for later moving of return addr. Only + // applies to tail call optimized functions where the callee argument stack + // size is bigger than the callers. + if (TailCallReturnAddrDelta < 0) { + BuildStackAdjustment(MBB, MBBI, DL, TailCallReturnAddrDelta, + /*InEpilogue=*/false) + .setMIFlag(MachineInstr::FrameSetup); + } + + // Mapping for machine moves: + // + // DST: VirtualFP AND + // SRC: VirtualFP => DW_CFA_def_cfa_offset + // ELSE => DW_CFA_def_cfa + // + // SRC: VirtualFP AND + // DST: Register => DW_CFA_def_cfa_register + // + // ELSE + // OFFSET < 0 => DW_CFA_offset_extended_sf + // REG < 64 => DW_CFA_offset + Reg + // ELSE => DW_CFA_offset_extended + + uint64_t NumBytes = 0; + int stackGrowth = -SlotSize; + + if (HasFP) { + // Calculate required stack adjustment. + uint64_t FrameSize = StackSize - SlotSize; + // If required, include space for extra hidden slot for stashing base + // pointer. + if (MMFI->getRestoreBasePointer()) + FrameSize += SlotSize; + + NumBytes = FrameSize - MMFI->getCalleeSavedFrameSize(); + + // Callee-saved registers are pushed on stack before the stack is realigned. + if (TRI->needsStackRealignment(MF)) + NumBytes = alignTo(NumBytes, MaxAlign); + + // Get the offset of the stack slot for the FP register, which is + // guaranteed to be the last slot by processFunctionBeforeFrameFinalized. + // Update the frame offset adjustment. + MFI.setOffsetAdjustment(-NumBytes); + + // Save FP into the appropriate stack slot. + BuildMI(MBB, MBBI, DL, TII.get(M68k::PUSH32r)) + .addReg(MachineFramePtr, RegState::Kill) + .setMIFlag(MachineInstr::FrameSetup); + + if (NeedsDwarfCFI) { + // Mark the place where FP was saved. + // Define the current CFA rule to use the provided offset. + assert(StackSize); + BuildCFI(MBB, MBBI, DL, + MCCFIInstruction::cfiDefCfaOffset(nullptr, 2 * stackGrowth)); + + // Change the rule for the FramePtr to be an "offset" rule. + int DwarfFramePtr = TRI->getDwarfRegNum(MachineFramePtr, true); + assert(DwarfFramePtr > 0); + BuildCFI(MBB, MBBI, DL, + MCCFIInstruction::createOffset(nullptr, DwarfFramePtr, + 2 * stackGrowth)); + } + + // Update FP with the new base value. + BuildMI(MBB, MBBI, DL, TII.get(M68k::MOV32aa), FramePtr) + .addReg(StackPtr) + .setMIFlag(MachineInstr::FrameSetup); + + if (NeedsDwarfCFI) { + // Mark effective beginning of when frame pointer becomes valid. + // Define the current CFA to use the FP register. + unsigned DwarfFramePtr = TRI->getDwarfRegNum(MachineFramePtr, true); + BuildCFI(MBB, MBBI, DL, + MCCFIInstruction::createDefCfaRegister(nullptr, DwarfFramePtr)); + } + + // Mark the FramePtr as live-in in every block. Don't do this again for + // funclet prologues. + for (MachineBasicBlock &EveryMBB : MF) + EveryMBB.addLiveIn(MachineFramePtr); + } else { + NumBytes = StackSize - MMFI->getCalleeSavedFrameSize(); + } + + // Skip the callee-saved push instructions. + bool PushedRegs = false; + int StackOffset = 2 * stackGrowth; + + while (MBBI != MBB.end() && MBBI->getFlag(MachineInstr::FrameSetup) && + MBBI->getOpcode() == M68k::PUSH32r) { + PushedRegs = true; + ++MBBI; + + if (!HasFP && NeedsDwarfCFI) { + // Mark callee-saved push instruction. + // Define the current CFA rule to use the provided offset. + assert(StackSize); + BuildCFI(MBB, MBBI, DL, + MCCFIInstruction::cfiDefCfaOffset(nullptr, StackOffset)); + StackOffset += stackGrowth; + } + } + + // Realign stack after we pushed callee-saved registers (so that we'll be + // able to calculate their offsets from the frame pointer). + if (TRI->needsStackRealignment(MF)) { + assert(HasFP && "There should be a frame pointer if stack is realigned."); + BuildStackAlignAND(MBB, MBBI, DL, StackPtr, MaxAlign); + } + + // If there is an SUB32ri of SP immediately before this instruction, merge + // the two. This can be the case when tail call elimination is enabled and + // the callee has more arguments then the caller. + NumBytes -= mergeSPUpdates(MBB, MBBI, true); + + // Adjust stack pointer: ESP -= numbytes. + emitSPUpdate(MBB, MBBI, -(int64_t)NumBytes, /*InEpilogue=*/false); + + unsigned SPOrEstablisher = StackPtr; + + // If we need a base pointer, set it up here. It's whatever the value + // of the stack pointer is at this point. Any variable size objects + // will be allocated after this, so we can still use the base pointer + // to reference locals. + if (TRI->hasBasePointer(MF)) { + // Update the base pointer with the current stack pointer. + BuildMI(MBB, MBBI, DL, TII.get(M68k::MOV32aa), BasePtr) + .addReg(SPOrEstablisher) + .setMIFlag(MachineInstr::FrameSetup); + if (MMFI->getRestoreBasePointer()) { + // Stash value of base pointer. Saving SP instead of FP shortens + // dependence chain. Used by SjLj EH. + unsigned Opm = M68k::MOV32ja; + M68k::addRegIndirectWithDisp(BuildMI(MBB, MBBI, DL, TII.get(Opm)), + FramePtr, true, + MMFI->getRestoreBasePointerOffset()) + .addReg(SPOrEstablisher) + .setMIFlag(MachineInstr::FrameSetup); + } + } + + if (((!HasFP && NumBytes) || PushedRegs) && NeedsDwarfCFI) { + // Mark end of stack pointer adjustment. + if (!HasFP && NumBytes) { + // Define the current CFA rule to use the provided offset. + assert(StackSize); + BuildCFI( + MBB, MBBI, DL, + MCCFIInstruction::cfiDefCfaOffset(nullptr, -StackSize + stackGrowth)); + } + + // Emit DWARF info specifying the offsets of the callee-saved registers. + if (PushedRegs) + emitCalleeSavedFrameMoves(MBB, MBBI, DL); + } + + // TODO #10 interrupts... + // M68k Interrupt handling function cannot assume anything about the + // direction flag (DF in CCR register). Clear this flag by creating "cld" + // instruction in each prologue of interrupt handler function. + // + // FIXME: Create "cld" instruction only in these cases: + // 1. The interrupt handling function uses any of the "rep" instructions. + // 2. Interrupt handling function calls another function. + // + // if (Fn.getCallingConv() == CallingConv::M68k_INTR) + // BuildMI(MBB, MBBI, DL, TII.get(M68k::CLD)) + // .setMIFlag(MachineInstr::FrameSetup); +} + +static bool isTailCallOpcode(unsigned Opc) { + return Opc == M68k::TCRETURNj || Opc == M68k::TCRETURNq; +} + +void M68kFrameLowering::emitEpilogue(MachineFunction &MF, + MachineBasicBlock &MBB) const { + const MachineFrameInfo &MFI = MF.getFrameInfo(); + M68kMachineFunctionInfo *MMFI = MF.getInfo(); + MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator(); + Optional RetOpcode; + if (MBBI != MBB.end()) + RetOpcode = MBBI->getOpcode(); + DebugLoc DL; + if (MBBI != MBB.end()) + DL = MBBI->getDebugLoc(); + unsigned FramePtr = TRI->getFrameRegister(MF); + unsigned MachineFramePtr = FramePtr; + + // Get the number of bytes to allocate from the FrameInfo. + uint64_t StackSize = MFI.getStackSize(); + uint64_t MaxAlign = calculateMaxStackAlign(MF); + unsigned CSSize = MMFI->getCalleeSavedFrameSize(); + uint64_t NumBytes = 0; + + if (hasFP(MF)) { + // Calculate required stack adjustment. + uint64_t FrameSize = StackSize - SlotSize; + NumBytes = FrameSize - CSSize; + + // Callee-saved registers were pushed on stack before the stack was + // realigned. + if (TRI->needsStackRealignment(MF)) + NumBytes = alignTo(FrameSize, MaxAlign); + + // Pop FP. + BuildMI(MBB, MBBI, DL, TII.get(M68k::POP32r), MachineFramePtr) + .setMIFlag(MachineInstr::FrameDestroy); + } else { + NumBytes = StackSize - CSSize; + } + + // Skip the callee-saved pop instructions. + while (MBBI != MBB.begin()) { + MachineBasicBlock::iterator PI = std::prev(MBBI); + unsigned Opc = PI->getOpcode(); + + if ((Opc != M68k::POP32r || !PI->getFlag(MachineInstr::FrameDestroy)) && + Opc != M68k::DBG_VALUE && !PI->isTerminator()) + break; + + --MBBI; + } + MachineBasicBlock::iterator FirstCSPop = MBBI; + + if (MBBI != MBB.end()) + DL = MBBI->getDebugLoc(); + + // If there is an ADD32ri or SUB32ri of SP immediately before this + // instruction, merge the two instructions. + if (NumBytes || MFI.hasVarSizedObjects()) + NumBytes += mergeSPUpdates(MBB, MBBI, true); + + // If dynamic alloca is used, then reset SP to point to the last callee-saved + // slot before popping them off! Same applies for the case, when stack was + // realigned. Don't do this if this was a funclet epilogue, since the funclets + // will not do realignment or dynamic stack allocation. + if ((TRI->needsStackRealignment(MF) || MFI.hasVarSizedObjects())) { + if (TRI->needsStackRealignment(MF)) + MBBI = FirstCSPop; + uint64_t LEAAmount = -CSSize; + + // 'move %FramePtr, SP' will not be recognized as an epilogue sequence. + // However, we may use this sequence if we have a frame pointer because the + // effects of the prologue can safely be undone. + if (LEAAmount != 0) { + unsigned Opc = getLEArOpcode(); + M68k::addRegIndirectWithDisp( + BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr), FramePtr, false, + LEAAmount); + --MBBI; + } else { + unsigned Opc = (M68k::MOV32rr); + BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr).addReg(FramePtr); + --MBBI; + } + } else if (NumBytes) { + // Adjust stack pointer back: SP += numbytes. + emitSPUpdate(MBB, MBBI, NumBytes, /*InEpilogue=*/true); + --MBBI; + } + + if (!RetOpcode || !isTailCallOpcode(*RetOpcode)) { + // Add the return addr area delta back since we are not tail calling. + int Offset = -1 * MMFI->getTCReturnAddrDelta(); + assert(Offset >= 0 && "TCDelta should never be positive"); + if (Offset) { + MBBI = MBB.getFirstTerminator(); + + // Check for possible merge with preceding ADD instruction. + Offset += mergeSPUpdates(MBB, MBBI, true); + emitSPUpdate(MBB, MBBI, Offset, /*InEpilogue=*/true); + } + } +} + +void M68kFrameLowering::determineCalleeSaves(MachineFunction &MF, + BitVector &SavedRegs, + RegScavenger *RS) const { + TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS); + + MachineFrameInfo &MFI = MF.getFrameInfo(); + + M68kMachineFunctionInfo *M68kFI = MF.getInfo(); + int64_t TailCallReturnAddrDelta = M68kFI->getTCReturnAddrDelta(); + + if (TailCallReturnAddrDelta < 0) { + // create RETURNADDR area + // arg + // arg + // RETADDR + // { ... + // RETADDR area + // ... + // } + // [FP] + MFI.CreateFixedObject(-TailCallReturnAddrDelta, + TailCallReturnAddrDelta - SlotSize, true); + } + + // Spill the BasePtr if it's used. + if (TRI->hasBasePointer(MF)) { + SavedRegs.set(TRI->getBaseRegister()); + } +} + +bool M68kFrameLowering::assignCalleeSavedSpillSlots( + MachineFunction &MF, const TargetRegisterInfo *TRI, + std::vector &CSI) const { + MachineFrameInfo &MFI = MF.getFrameInfo(); + M68kMachineFunctionInfo *M68kFI = MF.getInfo(); + + int SpillSlotOffset = getOffsetOfLocalArea() + M68kFI->getTCReturnAddrDelta(); + + if (hasFP(MF)) { + // emitPrologue always spills frame register the first thing. + SpillSlotOffset -= SlotSize; + MFI.CreateFixedSpillStackObject(SlotSize, SpillSlotOffset); + + // Since emitPrologue and emitEpilogue will handle spilling and restoring of + // the frame register, we can delete it from CSI list and not have to worry + // about avoiding it later. + unsigned FPReg = TRI->getFrameRegister(MF); + for (unsigned i = 0, e = CSI.size(); i < e; ++i) { + if (TRI->regsOverlap(CSI[i].getReg(), FPReg)) { + CSI.erase(CSI.begin() + i); + break; + } + } + } + + // The rest is fine + return false; +} + +bool M68kFrameLowering::spillCalleeSavedRegisters( + MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, + ArrayRef CSI, const TargetRegisterInfo *TRI) const { + auto &MRI = *static_cast(TRI); + auto DL = MBB.findDebugLoc(MI); + + int FI = 0; + unsigned Mask = 0; + for (const auto &Info : CSI) { + FI = std::max(FI, Info.getFrameIdx()); + unsigned Reg = Info.getReg(); + unsigned Shift = MRI.getSpillRegisterOrder(Reg); + Mask |= 1 << Shift; + } + + auto I = + M68k::addFrameReference(BuildMI(MBB, MI, DL, TII.get(M68k::MOVM32pm)), FI) + .addImm(Mask) + .setMIFlag(MachineInstr::FrameSetup); + + // Append implicit registers and mem locations + const MachineFunction &MF = *MBB.getParent(); + const MachineRegisterInfo &RI = MF.getRegInfo(); + for (const auto &Info : CSI) { + unsigned Reg = Info.getReg(); + bool isLiveIn = RI.isLiveIn(Reg); + if (!isLiveIn) + MBB.addLiveIn(Reg); + I.addReg(Reg, isLiveIn ? RegState::Implicit : RegState::ImplicitKill); + M68k::addMemOperand(I, Info.getFrameIdx(), 0); + } + + return true; +} + +bool M68kFrameLowering::restoreCalleeSavedRegisters( + MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, + MutableArrayRef CSI, const TargetRegisterInfo *TRI) const { + auto &MRI = *static_cast(TRI); + auto DL = MBB.findDebugLoc(MI); + + int FI = 0; + unsigned Mask = 0; + for (const auto &Info : CSI) { + FI = std::max(FI, Info.getFrameIdx()); + unsigned Reg = Info.getReg(); + unsigned Shift = MRI.getSpillRegisterOrder(Reg); + Mask |= 1 << Shift; + } + + auto I = M68k::addFrameReference( + BuildMI(MBB, MI, DL, TII.get(M68k::MOVM32mp)).addImm(Mask), FI) + .setMIFlag(MachineInstr::FrameDestroy); + + // Append implicit registers and mem locations + for (const auto &Info : CSI) { + I.addReg(Info.getReg(), RegState::ImplicitDefine); + M68k::addMemOperand(I, Info.getFrameIdx(), 0); + } + + return true; +} Index: llvm/lib/Target/M68k/M68kISelDAGToDAG.cpp =================================================================== --- /dev/null +++ llvm/lib/Target/M68k/M68kISelDAGToDAG.cpp @@ -0,0 +1,891 @@ +//===- M68kISelDAGToDAG.cpp - M68k Dag to Dag Inst Selector -*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file defines an instruction selector for the M68K target. +/// +//===----------------------------------------------------------------------===// + +#include "M68k.h" + +#include "M68kMachineFunction.h" +#include "M68kRegisterInfo.h" +#include "M68kTargetMachine.h" + +#include "llvm/CodeGen/MachineConstantPool.h" +#include "llvm/CodeGen/MachineFrameInfo.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/SelectionDAGISel.h" +#include "llvm/CodeGen/SelectionDAGNodes.h" +#include "llvm/IR/CFG.h" +#include "llvm/IR/GlobalValue.h" +#include "llvm/IR/Instructions.h" +#include "llvm/IR/Intrinsics.h" +#include "llvm/IR/Type.h" +#include "llvm/Support/Alignment.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/MathExtras.h" +#include "llvm/Support/raw_ostream.h" +#include "llvm/Target/TargetMachine.h" + +using namespace llvm; + +#define DEBUG_TYPE "m68k-isel" + +namespace { + +// For reference, the full order of operands for memory references is: +// (Operand), Displacement, Base, Index, Scale +struct M68kISelAddressMode { + enum AddrType { + ARI, // Address Register Indirect + ARIPI, // Address Register Indirect with Postincrement + ARIPD, // Address Register Indirect with Postdecrement + ARID, // Address Register Indirect with Displacement + ARII, // Address Register Indirect with Index + PCD, // Program Counter Indirect with Displacement + PCI, // Program Counter Indirect with Index + AL, // Absolute + } AM; + + enum { RegBase, FrameIndexBase } BaseType; + + int64_t Disp; + + // This is really a union, discriminated by BaseType! + SDValue BaseReg; + int BaseFrameIndex; + + SDValue IndexReg; + unsigned Scale; + + const GlobalValue *GV; + const Constant *CP; + const BlockAddress *BlockAddr; + const char *ES; + MCSymbol *MCSym; + int JT; + Align Alignment; // CP alignment. + + unsigned char SymbolFlags; // M68kII::MO_* + + M68kISelAddressMode(AddrType AT) + : AM(AT), BaseType(RegBase), Disp(0), BaseFrameIndex(0), IndexReg(), + Scale(1), GV(nullptr), CP(nullptr), BlockAddr(nullptr), ES(nullptr), + MCSym(nullptr), JT(-1), Alignment(), SymbolFlags(M68kII::MO_NO_FLAG) {} + + bool hasSymbolicDisplacement() const { + return GV != nullptr || CP != nullptr || ES != nullptr || + MCSym != nullptr || JT != -1 || BlockAddr != nullptr; + } + + bool hasBase() const { + return BaseType == FrameIndexBase || BaseReg.getNode() != nullptr; + } + + bool hasFrameIndex() const { return BaseType == FrameIndexBase; } + + bool hasBaseReg() const { + return BaseType == RegBase && BaseReg.getNode() != nullptr; + } + + bool hasIndexReg() const { + return BaseType == RegBase && IndexReg.getNode() != nullptr; + } + + /// True if address mode type supports displacement + bool isDispAddrType() const { + return AM == ARII || AM == PCI || AM == ARID || AM == PCD || AM == AL; + } + + unsigned getDispSize() const { + switch (AM) { + default: + return 0; + case ARII: + case PCI: + return 8; + // These two in the next chip generations can hold upto 32 bit + case ARID: + case PCD: + return 16; + case AL: + return 32; + } + } + + bool hasDisp() const { return getDispSize() != 0; } + bool isDisp8() const { return getDispSize() == 8; } + bool isDisp16() const { return getDispSize() == 16; } + bool isDisp32() const { return getDispSize() == 32; } + + /// Return true if this addressing mode is already PC-relative. + bool isPCRelative() const { + if (BaseType != RegBase) + return false; + if (RegisterSDNode *RegNode = + dyn_cast_or_null(BaseReg.getNode())) + return RegNode->getReg() == M68k::PC; + return false; + } + + void setBaseReg(SDValue Reg) { + BaseType = RegBase; + BaseReg = Reg; + } + + void setIndexReg(SDValue Reg) { IndexReg = Reg; } + +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) + void dump() { + dbgs() << "M68kISelAddressMode " << this; + dbgs() << "\nDisp: " << Disp; + dbgs() << ", BaseReg: "; + if (BaseReg.getNode()) + BaseReg.getNode()->dump(); + else + dbgs() << "null"; + dbgs() << ", BaseFI: " << BaseFrameIndex; + dbgs() << ", IndexReg: "; + if (IndexReg.getNode()) { + IndexReg.getNode()->dump(); + } else { + dbgs() << "null"; + dbgs() << ", Scale: " << Scale; + } + dbgs() << '\n'; + } +#endif +}; +} // end anonymous namespace + +namespace { + +class M68kDAGToDAGISel : public SelectionDAGISel { +public: + explicit M68kDAGToDAGISel(M68kTargetMachine &TM) + : SelectionDAGISel(TM), Subtarget(nullptr) {} + + StringRef getPassName() const override { + return "M68k DAG->DAG Pattern Instruction Selection"; + } + + bool runOnMachineFunction(MachineFunction &MF) override; + +private: + /// Keep a pointer to the M68kSubtarget around so that we can + /// make the right decision when generating code for different targets. + const M68kSubtarget *Subtarget; + +// Include the pieces autogenerated from the target description. +#include "M68kGenDAGISel.inc" + + /// getTargetMachine - Return a reference to the TargetMachine, casted + /// to the target-specific type. + const M68kTargetMachine &getTargetMachine() { + return static_cast(TM); + } + + void Select(SDNode *N) override; + + // Insert instructions to initialize the global base register in the + // first MBB of the function. + // HMM... do i need this? + void initGlobalBaseReg(MachineFunction &MF); + + bool foldOffsetIntoAddress(uint64_t Offset, M68kISelAddressMode &AM); + + bool matchLoadInAddress(LoadSDNode *N, M68kISelAddressMode &AM); + bool matchAddress(SDValue N, M68kISelAddressMode &AM); + bool matchAddressBase(SDValue N, M68kISelAddressMode &AM); + bool matchAddressRecursively(SDValue N, M68kISelAddressMode &AM, + unsigned Depth); + bool matchADD(SDValue &N, M68kISelAddressMode &AM, unsigned Depth); + bool matchWrapper(SDValue N, M68kISelAddressMode &AM); + + std::pair selectNode(SDNode *Node); + + bool SelectARI(SDNode *Parent, SDValue N, SDValue &Base); + bool SelectARIPI(SDNode *Parent, SDValue N, SDValue &Base); + bool SelectARIPD(SDNode *Parent, SDValue N, SDValue &Base); + bool SelectARID(SDNode *Parent, SDValue N, SDValue &Imm, SDValue &Base); + bool SelectARII(SDNode *Parent, SDValue N, SDValue &Imm, SDValue &Base, + SDValue &Index); + bool SelectAL(SDNode *Parent, SDValue N, SDValue &Sym); + bool SelectPCD(SDNode *Parent, SDValue N, SDValue &Imm); + bool SelectPCI(SDNode *Parent, SDValue N, SDValue &Imm, SDValue &Index); + + // If Address Mode represents Frame Index store FI in Disp and + // Displacement bit size in Base. These values are read symmetrically by + // M68kRegisterInfo::eliminateFrameIndex method + inline bool getFrameIndexAddress(M68kISelAddressMode &AM, const SDLoc &DL, + SDValue &Disp, SDValue &Base) { + if (AM.BaseType == M68kISelAddressMode::FrameIndexBase) { + Disp = getI32Imm(AM.Disp, DL); + Base = CurDAG->getTargetFrameIndex( + AM.BaseFrameIndex, TLI->getPointerTy(CurDAG->getDataLayout())); + return true; + } + + return false; + } + + // Gets a symbol plus optional displacement + inline bool getSymbolicDisplacement(M68kISelAddressMode &AM, const SDLoc &DL, + SDValue &Sym) { + if (AM.GV) { + Sym = CurDAG->getTargetGlobalAddress(AM.GV, SDLoc(), MVT::i32, AM.Disp, + AM.SymbolFlags); + return true; + } else if (AM.CP) { + Sym = CurDAG->getTargetConstantPool(AM.CP, MVT::i32, AM.Alignment, + AM.Disp, AM.SymbolFlags); + return true; + } else if (AM.ES) { + assert(!AM.Disp && "Non-zero displacement is ignored with ES."); + Sym = CurDAG->getTargetExternalSymbol(AM.ES, MVT::i32, AM.SymbolFlags); + return true; + } else if (AM.MCSym) { + assert(!AM.Disp && "Non-zero displacement is ignored with MCSym."); + assert(AM.SymbolFlags == 0 && "oo"); + Sym = CurDAG->getMCSymbol(AM.MCSym, MVT::i32); + return true; + } else if (AM.JT != -1) { + assert(!AM.Disp && "Non-zero displacement is ignored with JT."); + Sym = CurDAG->getTargetJumpTable(AM.JT, MVT::i32, AM.SymbolFlags); + return true; + } else if (AM.BlockAddr) { + Sym = CurDAG->getTargetBlockAddress(AM.BlockAddr, MVT::i32, AM.Disp, + AM.SymbolFlags); + return true; + } + + return false; + } + + /// Return a target constant with the specified value of type i8. + inline SDValue getI8Imm(int64_t Imm, const SDLoc &DL) { + return CurDAG->getTargetConstant(Imm, DL, MVT::i8); + } + + /// Return a target constant with the specified value of type i8. + inline SDValue getI16Imm(int64_t Imm, const SDLoc &DL) { + return CurDAG->getTargetConstant(Imm, DL, MVT::i16); + } + + /// Return a target constant with the specified value, of type i32. + inline SDValue getI32Imm(int64_t Imm, const SDLoc &DL) { + return CurDAG->getTargetConstant(Imm, DL, MVT::i32); + } + + /// Return a reference to the TargetInstrInfo, casted to the target-specific + /// type. + const M68kInstrInfo *getInstrInfo() const { + return Subtarget->getInstrInfo(); + } + + /// Return an SDNode that returns the value of the global base register. + /// Output instructions required to initialize the global base register, + /// if necessary. + SDNode *getGlobalBaseReg(); +}; +} // namespace + +bool M68kDAGToDAGISel::runOnMachineFunction(MachineFunction &MF) { + Subtarget = &static_cast(MF.getSubtarget()); + return SelectionDAGISel::runOnMachineFunction(MF); +} + +/// This pass converts a legalized DAG into a M68k-specific DAG, +/// ready for instruction scheduling. +FunctionPass *llvm::createM68kISelDag(M68kTargetMachine &TM) { + return new M68kDAGToDAGISel(TM); +} + +static bool doesDispFitFI(M68kISelAddressMode &AM) { + if (!AM.isDispAddrType()) + return false; + // -1 to make sure that resolved FI will fit into Disp field + return isIntN(AM.getDispSize() - 1, AM.Disp); +} + +static bool doesDispFit(M68kISelAddressMode &AM, int64_t Val) { + if (!AM.isDispAddrType()) + return false; + return isIntN(AM.getDispSize(), Val); +} + +/// Return an SDNode that returns the value of the global base register. +/// Output instructions required to initialize the global base register, +/// if necessary. +SDNode *M68kDAGToDAGISel::getGlobalBaseReg() { + unsigned GlobalBaseReg = getInstrInfo()->getGlobalBaseReg(MF); + auto &DL = MF->getDataLayout(); + return CurDAG->getRegister(GlobalBaseReg, TLI->getPointerTy(DL)).getNode(); +} + +bool M68kDAGToDAGISel::foldOffsetIntoAddress(uint64_t Offset, + M68kISelAddressMode &AM) { + // Cannot combine ExternalSymbol displacements with integer offsets. + if (Offset != 0 && (AM.ES || AM.MCSym)) + return false; + + int64_t Val = AM.Disp + Offset; + + if (doesDispFit(AM, Val)) { + AM.Disp = Val; + return true; + } + + return false; +} + +//===----------------------------------------------------------------------===// +// Matchers +//===----------------------------------------------------------------------===// + +/// Helper for MatchAddress. Add the specified node to the +/// specified addressing mode without any further recursion. +bool M68kDAGToDAGISel::matchAddressBase(SDValue N, M68kISelAddressMode &AM) { + // Is the base register already occupied? + if (AM.hasBase()) { + // If so, check to see if the scale index register is set. + if (!AM.hasIndexReg()) { + AM.IndexReg = N; + AM.Scale = 1; + return true; + } + + // Otherwise, we cannot select it. + return false; + } + + // Default, generate it as a register. + AM.BaseType = M68kISelAddressMode::RegBase; + AM.BaseReg = N; + return true; +} + +/// TODO Add TLS support +bool M68kDAGToDAGISel::matchLoadInAddress(LoadSDNode *N, + M68kISelAddressMode &AM) { + return false; +} + +bool M68kDAGToDAGISel::matchAddressRecursively(SDValue N, + M68kISelAddressMode &AM, + unsigned Depth) { + SDLoc DL(N); + + // Limit recursion. + if (Depth > 5) + return matchAddressBase(N, AM); + + // If this is already a %PC relative address, we can only merge immediates + // into it. Instead of handling this in every case, we handle it here. + // PC relative addressing: %PC + 16-bit displacement! + if (AM.isPCRelative()) { + // FIXME #12 JumpTable and ExternalSymbol address currently don't like + // displacements. It isn't very important, but this should be fixed for + // consistency. + // if (!(AM.ES || AM.MCSym) && AM.JT != -1) + // return true; + + if (ConstantSDNode *Cst = dyn_cast(N)) + if (foldOffsetIntoAddress(Cst->getSExtValue(), AM)) + return true; + return false; + } + + switch (N.getOpcode()) { + default: + break; + + case ISD::Constant: { + uint64_t Val = cast(N)->getSExtValue(); + if (foldOffsetIntoAddress(Val, AM)) + return true; + break; + } + + case M68kISD::Wrapper: + case M68kISD::WrapperPC: + if (matchWrapper(N, AM)) + return true; + break; + + case ISD::LOAD: + if (matchLoadInAddress(cast(N), AM)) + return true; + break; + + case ISD::OR: + // We want to look through a transform in InstCombine and DAGCombiner that + // turns 'add' into 'or', so we can treat this 'or' exactly like an 'add'. + // Example: (or (and x, 1), (shl y, 3)) --> (add (and x, 1), (shl y, 3)) + // An 'lea' can then be used to match the shift (multiply) and add: + // and $1, %esi + // lea (%rsi, %rdi, 8), %rax + if (CurDAG->haveNoCommonBitsSet(N.getOperand(0), N.getOperand(1)) && + matchADD(N, AM, Depth)) + return true; + break; + + case ISD::ADD: + if (matchADD(N, AM, Depth)) + return true; + break; + + case ISD::FrameIndex: + if (AM.isDispAddrType() && AM.BaseType == M68kISelAddressMode::RegBase && + AM.BaseReg.getNode() == nullptr && doesDispFitFI(AM)) { + AM.BaseType = M68kISelAddressMode::FrameIndexBase; + AM.BaseFrameIndex = cast(N)->getIndex(); + return true; + } + break; + } + + return matchAddressBase(N, AM); +} + +/// Add the specified node to the specified addressing mode, returning true if +/// it cannot be done. This just pattern matches for the addressing mode. +bool M68kDAGToDAGISel::matchAddress(SDValue N, M68kISelAddressMode &AM) { + // TODO: Post-processing: Convert lea(,%reg,2) to lea(%reg,%reg), which has + // a smaller encoding and avoids a scaled-index. + // And make sure it is an indexed mode + + // TODO: Post-processing: Convert foo to foo(%pc), even in non-PIC mode, + // because it has a smaller encoding. + // Make sure this must be done only if PC* modes are currently being matched + return matchAddressRecursively(N, AM, 0); +} + +bool M68kDAGToDAGISel::matchADD(SDValue &N, M68kISelAddressMode &AM, + unsigned Depth) { + // Add an artificial use to this node so that we can keep track of + // it if it gets CSE'd with a different node. + HandleSDNode Handle(N); + + M68kISelAddressMode Backup = AM; + if (matchAddressRecursively(N.getOperand(0), AM, Depth + 1) && + matchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth + 1)) { + return true; + } + AM = Backup; + + // Try again after commuting the operands. + if (matchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth + 1) && + matchAddressRecursively(Handle.getValue().getOperand(0), AM, Depth + 1)) { + return true; + } + AM = Backup; + + // If we couldn't fold both operands into the address at the same time, + // see if we can just put each operand into a register and fold at least + // the add. + if (!AM.hasBase() && !AM.hasIndexReg()) { + N = Handle.getValue(); + AM.BaseReg = N.getOperand(0); + AM.IndexReg = N.getOperand(1); + AM.Scale = 1; + return true; + } + + N = Handle.getValue(); + return false; +} + +/// Try to match M68kISD::Wrapper and M68kISD::WrapperPC nodes into an +/// addressing mode. These wrap things that will resolve down into a symbol +/// reference. If no match is possible, this returns true, otherwise it returns +/// false. +bool M68kDAGToDAGISel::matchWrapper(SDValue N, M68kISelAddressMode &AM) { + // If the addressing mode already has a symbol as the displacement, we can + // never match another symbol. + if (AM.hasSymbolicDisplacement()) + return false; + + SDValue N0 = N.getOperand(0); + + if (N.getOpcode() == M68kISD::WrapperPC) { + + // If cannot match here just restore the old version + M68kISelAddressMode Backup = AM; + + if (AM.hasBase()) { + return false; + } + + if (GlobalAddressSDNode *G = dyn_cast(N0)) { + AM.GV = G->getGlobal(); + AM.SymbolFlags = G->getTargetFlags(); + if (!foldOffsetIntoAddress(G->getOffset(), AM)) { + AM = Backup; + return false; + } + } else if (ConstantPoolSDNode *CP = dyn_cast(N0)) { + AM.CP = CP->getConstVal(); + AM.Alignment = CP->getAlign(); + AM.SymbolFlags = CP->getTargetFlags(); + if (!foldOffsetIntoAddress(CP->getOffset(), AM)) { + AM = Backup; + return false; + } + } else if (ExternalSymbolSDNode *S = dyn_cast(N0)) { + AM.ES = S->getSymbol(); + AM.SymbolFlags = S->getTargetFlags(); + } else if (auto *S = dyn_cast(N0)) { + AM.MCSym = S->getMCSymbol(); + } else if (JumpTableSDNode *J = dyn_cast(N0)) { + AM.JT = J->getIndex(); + AM.SymbolFlags = J->getTargetFlags(); + } else if (BlockAddressSDNode *BA = dyn_cast(N0)) { + AM.BlockAddr = BA->getBlockAddress(); + AM.SymbolFlags = BA->getTargetFlags(); + if (!foldOffsetIntoAddress(BA->getOffset(), AM)) { + AM = Backup; + return false; + } + } else + llvm_unreachable("Unhandled symbol reference node."); + + AM.setBaseReg(CurDAG->getRegister(M68k::PC, MVT::i32)); + return true; + } + + // This wrapper requires 32bit disp/imm field for Medium CM + if (!AM.isDisp32()) { + return false; + } + + if (N.getOpcode() == M68kISD::Wrapper) { + if (GlobalAddressSDNode *G = dyn_cast(N0)) { + AM.GV = G->getGlobal(); + AM.Disp += G->getOffset(); + AM.SymbolFlags = G->getTargetFlags(); + } else if (ConstantPoolSDNode *CP = dyn_cast(N0)) { + AM.CP = CP->getConstVal(); + AM.Alignment = CP->getAlign(); + AM.Disp += CP->getOffset(); + AM.SymbolFlags = CP->getTargetFlags(); + } else if (ExternalSymbolSDNode *S = dyn_cast(N0)) { + AM.ES = S->getSymbol(); + AM.SymbolFlags = S->getTargetFlags(); + } else if (auto *S = dyn_cast(N0)) { + AM.MCSym = S->getMCSymbol(); + } else if (JumpTableSDNode *J = dyn_cast(N0)) { + AM.JT = J->getIndex(); + AM.SymbolFlags = J->getTargetFlags(); + } else if (BlockAddressSDNode *BA = dyn_cast(N0)) { + AM.BlockAddr = BA->getBlockAddress(); + AM.Disp += BA->getOffset(); + AM.SymbolFlags = BA->getTargetFlags(); + } else + llvm_unreachable("Unhandled symbol reference node."); + return true; + } + + return false; +} + +//===----------------------------------------------------------------------===// +// Selectors +//===----------------------------------------------------------------------===// + +void M68kDAGToDAGISel::Select(SDNode *Node) { + unsigned Opcode = Node->getOpcode(); + SDLoc DL(Node); + + LLVM_DEBUG(dbgs() << "Selecting: "; Node->dump(CurDAG); dbgs() << '\n'); + + if (Node->isMachineOpcode()) { + LLVM_DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << '\n'); + Node->setNodeId(-1); + return; // Already selected. + } + + switch (Opcode) { + default: + break; + + case M68kISD::GLOBAL_BASE_REG: + ReplaceNode(Node, getGlobalBaseReg()); + return; + } + + SelectCode(Node); +} + +bool M68kDAGToDAGISel::SelectARIPI(SDNode *Parent, SDValue N, SDValue &Base) { + LLVM_DEBUG(dbgs() << "Selecting ARIPI: "); + LLVM_DEBUG(dbgs() << "NOT IMPLEMENTED\n"); + return false; +} + +bool M68kDAGToDAGISel::SelectARIPD(SDNode *Parent, SDValue N, SDValue &Base) { + LLVM_DEBUG(dbgs() << "Selecting ARIPD: "); + LLVM_DEBUG(dbgs() << "NOT IMPLEMENTED\n"); + return false; +} + +bool M68kDAGToDAGISel::SelectARID(SDNode *Parent, SDValue N, SDValue &Disp, + SDValue &Base) { + LLVM_DEBUG(dbgs() << "Selecting ARID: "); + M68kISelAddressMode AM(M68kISelAddressMode::ARID); + + if (!matchAddress(N, AM)) + return false; + + if (AM.isPCRelative()) { + LLVM_DEBUG(dbgs() << "REJECT: Cannot match PC relative address\n"); + return false; + } + + // If this is a frame index, grab it + if (getFrameIndexAddress(AM, SDLoc(N), Disp, Base)) { + LLVM_DEBUG(dbgs() << "SUCCESS matched FI\n"); + return true; + } + + if (AM.hasIndexReg()) { + LLVM_DEBUG(dbgs() << "REJECT: Cannot match Index\n"); + return false; + } + + if (!AM.hasBaseReg()) { + LLVM_DEBUG(dbgs() << "REJECT: No Base reg\n"); + return false; + } + + if (getSymbolicDisplacement(AM, SDLoc(N), Disp)) { + assert(!AM.Disp && "Should not be any displacement"); + LLVM_DEBUG(dbgs() << "SUCCESS, matched Symbol\n"); + return true; + } + + // Give a chance to ARI + if (AM.Disp == 0) { + LLVM_DEBUG(dbgs() << "REJECT: No displacement\n"); + return false; + } + + Base = AM.BaseReg; + Disp = getI16Imm(AM.Disp, SDLoc(N)); + + LLVM_DEBUG(dbgs() << "SUCCESS\n"); + return true; +} + +static bool isAddressBase(const SDValue &N) { + unsigned Op = N.getOpcode(); + if (Op == M68kISD::Wrapper || Op == M68kISD::WrapperPC || + Op == M68kISD::GLOBAL_BASE_REG) { + return true; + } + + if (Op == ISD::ADD || Op == ISD::ADDC) { + for (unsigned i = 0; i < N.getNumOperands(); ++i) { + if (isAddressBase(N.getOperand(i))) { + return true; + } + } + } + + return false; +} + +bool M68kDAGToDAGISel::SelectARII(SDNode *Parent, SDValue N, SDValue &Disp, + SDValue &Base, SDValue &Index) { + M68kISelAddressMode AM(M68kISelAddressMode::ARII); + LLVM_DEBUG(dbgs() << "Selecting ARII: "); + + if (!matchAddress(N, AM)) + return false; + + if (AM.isPCRelative()) { + LLVM_DEBUG(dbgs() << "REJECT: PC relative\n"); + return false; + } + + if (!AM.hasIndexReg()) { + LLVM_DEBUG(dbgs() << "REJECT: No Index\n"); + return false; + } + + if (!AM.hasBaseReg()) { + LLVM_DEBUG(dbgs() << "REJECT: No Base\n"); + return false; + } + + if (!isAddressBase(AM.BaseReg) && isAddressBase(AM.IndexReg)) { + Base = AM.IndexReg; + Index = AM.BaseReg; + } else { + Base = AM.BaseReg; + Index = AM.IndexReg; + } + + if (AM.hasSymbolicDisplacement()) { + LLVM_DEBUG(dbgs() << "REJECT, Cannot match symbolic displacement\n"); + return false; + } + + // The idea here is that we want to use ARII without displacement only if + // necessary like memory operations, otherwise this must be lowered into + // addition + if (AM.Disp == 0 && (!Parent || (Parent->getOpcode() != ISD::LOAD && + Parent->getOpcode() != ISD::STORE))) { + LLVM_DEBUG(dbgs() << "REJECT: Displacement is Zero\n"); + return false; + } + + Disp = getI8Imm(AM.Disp, SDLoc(N)); + + LLVM_DEBUG(dbgs() << "SUCCESS\n"); + return true; +} + +bool M68kDAGToDAGISel::SelectAL(SDNode *Parent, SDValue N, SDValue &Sym) { + LLVM_DEBUG(dbgs() << "Selecting AL: "); + M68kISelAddressMode AM(M68kISelAddressMode::AL); + + if (!matchAddress(N, AM)) { + LLVM_DEBUG(dbgs() << "REJECT: Match failed\n"); + return false; + } + + if (AM.isPCRelative()) { + LLVM_DEBUG(dbgs() << "REJECT: Cannot match PC relative address\n"); + return false; + } + + if (AM.hasBase()) { + LLVM_DEBUG(dbgs() << "REJECT: Cannot match Base\n"); + return false; + } + + if (AM.hasIndexReg()) { + LLVM_DEBUG(dbgs() << "REJECT: Cannot match Index\n"); + return false; + } + + if (getSymbolicDisplacement(AM, SDLoc(N), Sym)) { + LLVM_DEBUG(dbgs() << "SUCCESS: Matched symbol\n"); + return true; + } + + if (AM.Disp) { + Sym = getI32Imm(AM.Disp, SDLoc(N)); + LLVM_DEBUG(dbgs() << "SUCCESS\n"); + return true; + } + + LLVM_DEBUG(dbgs() << "REJECT: Not Symbol or Disp\n"); + return false; + ; +} + +bool M68kDAGToDAGISel::SelectPCD(SDNode *Parent, SDValue N, SDValue &Disp) { + LLVM_DEBUG(dbgs() << "Selecting PCD: "); + M68kISelAddressMode AM(M68kISelAddressMode::PCD); + + if (!matchAddress(N, AM)) + return false; + + if (!AM.isPCRelative()) { + LLVM_DEBUG(dbgs() << "REJECT: Not PC relative\n"); + return false; + } + + if (AM.hasIndexReg()) { + LLVM_DEBUG(dbgs() << "REJECT: Cannot match Index\n"); + return false; + } + + if (getSymbolicDisplacement(AM, SDLoc(N), Disp)) { + LLVM_DEBUG(dbgs() << "SUCCESS, matched Symbol\n"); + return true; + } + + Disp = getI16Imm(AM.Disp, SDLoc(N)); + + LLVM_DEBUG(dbgs() << "SUCCESS\n"); + return true; +} + +bool M68kDAGToDAGISel::SelectPCI(SDNode *Parent, SDValue N, SDValue &Disp, + SDValue &Index) { + LLVM_DEBUG(dbgs() << "Selecting PCI: "); + M68kISelAddressMode AM(M68kISelAddressMode::PCI); + + if (!matchAddress(N, AM)) + return false; + + if (!AM.isPCRelative()) { + LLVM_DEBUG(dbgs() << "REJECT: Not PC relative\n"); + return false; + } + + if (!AM.hasIndexReg()) { + LLVM_DEBUG(dbgs() << "REJECT: No Index\n"); + return false; + } + + Index = AM.IndexReg; + + if (getSymbolicDisplacement(AM, SDLoc(N), Disp)) { + assert(!AM.Disp && "Should not be any displacement"); + LLVM_DEBUG(dbgs() << "SUCCESS, matched Symbol\n"); + return true; + } + + Disp = getI8Imm(AM.Disp, SDLoc(N)); + + LLVM_DEBUG(dbgs() << "SUCCESS\n"); + return true; +} + +bool M68kDAGToDAGISel::SelectARI(SDNode *Parent, SDValue N, SDValue &Base) { + LLVM_DEBUG(dbgs() << "Selecting ARI: "); + M68kISelAddressMode AM(M68kISelAddressMode::ARI); + + if (!matchAddress(N, AM)) { + LLVM_DEBUG(dbgs() << "REJECT: Match failed\n"); + return false; + } + + if (AM.isPCRelative()) { + LLVM_DEBUG(dbgs() << "REJECT: Cannot match PC relative address\n"); + return false; + } + + // ARI does not use these + if (AM.hasIndexReg() || AM.Disp != 0) { + LLVM_DEBUG(dbgs() << "REJECT: Cannot match Index or Disp\n"); + return false; + } + + // Must be matched by AL + if (AM.hasSymbolicDisplacement()) { + LLVM_DEBUG(dbgs() << "REJECT: Cannot match Symbolic Disp\n"); + return false; + } + + if (AM.hasBaseReg()) { + Base = AM.BaseReg; + LLVM_DEBUG(dbgs() << "SUCCESS\n"); + return true; + } + + return false; +} Index: llvm/lib/Target/M68k/M68kISelLowering.h =================================================================== --- /dev/null +++ llvm/lib/Target/M68k/M68kISelLowering.h @@ -0,0 +1,270 @@ +//===-- M68kISelLowering.h - M68k DAG Lowering Interface ----*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file defines the interfaces that M68k uses to lower LLVM code into a +/// selection DAG. +/// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIB_TARGET_M68K_M68KISELLOWERING_H +#define LLVM_LIB_TARGET_M68K_M68KISELLOWERING_H + +#include "M68k.h" + +#include "llvm/CodeGen/CallingConvLower.h" +#include "llvm/CodeGen/SelectionDAG.h" +#include "llvm/CodeGen/TargetLowering.h" +#include "llvm/IR/Function.h" + +#include + +namespace llvm { +namespace M68kISD { + +/// M68k Specific DAG nodes +enum NodeType { + /// Start the numbering from where ISD NodeType finishes. + FIRST_NUMBER = ISD::BUILTIN_OP_END, + + CALL, + RET, + TAIL_CALL, + TC_RETURN, + + /// M68k compare and logical compare instructions. Subtracts the source + /// operand from the destination data register and sets the condition + /// codes according to the result. Immediate always goes first. + CMP, + + /// M68k bit-test instructions. + BT, + + /// M68k Select + SELECT, + + /// M68k SetCC. Operand 0 is condition code, and operand 1 is the CCR + /// operand, usually produced by a CMP instruction. + SETCC, + + // Same as SETCC except it's materialized with a subx and the value is all + // one's or all zero's. + SETCC_CARRY, // R = carry_bit ? ~0 : 0 + + /// M68k conditional moves. Operand 0 and operand 1 are the two values + /// to select from. Operand 2 is the condition code, and operand 3 is the + /// flag operand produced by a CMP or TEST instruction. It also writes a + /// flag result. + CMOV, + + /// M68k conditional branches. Operand 0 is the chain operand, operand 1 + /// is the block to branch if condition is true, operand 2 is the + /// condition code, and operand 3 is the flag operand produced by a CMP + /// or TEST instruction. + BRCOND, + + // Arithmetic operations with CCR results. + ADD, + SUB, + ADDX, + SUBX, + SMUL, + UMUL, + OR, + XOR, + AND, + + // GlobalBaseReg, + GLOBAL_BASE_REG, + + /// A wrapper node for TargetConstantPool, + /// TargetExternalSymbol, and TargetGlobalAddress. + Wrapper, + + /// Special wrapper used under M68k PIC mode for PC + /// relative displacements. + WrapperPC, + + // For allocating variable amounts of stack space when using + // segmented stacks. Check if the current stacklet has enough space, and + // falls back to heap allocation if not. + SEG_ALLOCA, +}; +} // namespace M68kISD + +/// Define some predicates that are used for node matching. +namespace M68k { + +/// Determines whether the callee is required to pop its +/// own arguments. Callee pop is necessary to support tail calls. +bool isCalleePop(CallingConv::ID CallingConv, bool IsVarArg, bool GuaranteeTCO); + +} // end namespace M68k + +//===--------------------------------------------------------------------===// +// TargetLowering Implementation +//===--------------------------------------------------------------------===// + +class M68kMachineFunctionInfo; +class M68kSubtarget; + +class M68kTargetLowering : public TargetLowering { + const M68kSubtarget &Subtarget; + const M68kTargetMachine &TM; + +public: + explicit M68kTargetLowering(const M68kTargetMachine &TM, + const M68kSubtarget &STI); + + static const M68kTargetLowering *create(const M68kTargetMachine &TM, + const M68kSubtarget &STI); + + const char *getTargetNodeName(unsigned Opcode) const override; + + /// Return the value type to use for ISD::SETCC. + EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, + EVT VT) const override; + + /// EVT is not used in-tree, but is used by out-of-tree target. + virtual MVT getScalarShiftAmountTy(const DataLayout &, EVT) const override; + + /// Provide custom lowering hooks for some operations. + SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override; + + /// Return the entry encoding for a jump table in the current function. + /// The returned value is a member of the MachineJumpTableInfo::JTEntryKind + /// enum. + unsigned getJumpTableEncoding() const override; + + const MCExpr *LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI, + const MachineBasicBlock *MBB, + unsigned uid, + MCContext &Ctx) const override; + + /// Returns relocation base for the given PIC jumptable. + SDValue getPICJumpTableRelocBase(SDValue Table, + SelectionDAG &DAG) const override; + + /// This returns the relocation base for the given PIC jumptable, + /// the same as getPICJumpTableRelocBase, but as an MCExpr. + const MCExpr *getPICJumpTableRelocBaseExpr(const MachineFunction *MF, + unsigned JTI, + MCContext &Ctx) const override; + + /// Replace the results of node with an illegal result type with new values + /// built out of custom code. + // void ReplaceNodeResults(SDNode *N, SmallVectorImpl&Results, + // SelectionDAG &DAG) const override; + + // SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override; + + MachineBasicBlock * + EmitInstrWithCustomInserter(MachineInstr &MI, + MachineBasicBlock *MBB) const override; + +private: + unsigned GetAlignedArgumentStackSize(unsigned StackSize, + SelectionDAG &DAG) const; + + SDValue getReturnAddressFrameIndex(SelectionDAG &DAG) const; + + /// Emit a load of return address if tail call + /// optimization is performed and it is required. + SDValue EmitTailCallLoadRetAddr(SelectionDAG &DAG, SDValue &OutRetAddr, + SDValue Chain, bool IsTailCall, int FPDiff, + const SDLoc &DL) const; + + /// Emit a store of the return address if tail call + /// optimization is performed and it is required (FPDiff!=0). + SDValue EmitTailCallStoreRetAddr(SelectionDAG &DAG, MachineFunction &MF, + SDValue Chain, SDValue RetAddrFrIdx, + EVT PtrVT, unsigned SlotSize, int FPDiff, + const SDLoc &DL) const; + + SDValue LowerMemArgument(SDValue Chain, CallingConv::ID CallConv, + const SmallVectorImpl &ArgInfo, + const SDLoc &DL, SelectionDAG &DAG, + const CCValAssign &VA, MachineFrameInfo &MFI, + unsigned i) const; + + SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg, + const SDLoc &DL, SelectionDAG &DAG, + const CCValAssign &VA, ISD::ArgFlagsTy Flags) const; + + SDValue LowerMUL(SDValue &N, SelectionDAG &DAG) const; + SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerToBT(SDValue And, ISD::CondCode CC, const SDLoc &DL, + SelectionDAG &DAG) const; + SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerSETCCCARRY(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerGlobalAddress(const GlobalValue *GV, const SDLoc &DL, + int64_t Offset, SelectionDAG &DAG) const; + SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const; + + SDValue LowerCallResult(SDValue Chain, SDValue InFlag, + CallingConv::ID CallConv, bool isVarArg, + const SmallVectorImpl &Ins, + const SDLoc &DL, SelectionDAG &DAG, + SmallVectorImpl &InVals) const; + + /// LowerFormalArguments - transform physical registers into virtual + /// registers and generate load operations for arguments places on the stack. + SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CCID, + bool isVarArg, + const SmallVectorImpl &Ins, + const SDLoc &DL, SelectionDAG &DAG, + SmallVectorImpl &InVals) const override; + + SDValue LowerCall(CallLoweringInfo &CLI, + SmallVectorImpl &InVals) const override; + + /// Lower the result values of a call into the + /// appropriate copies out of appropriate physical registers. + SDValue LowerReturn(SDValue Chain, CallingConv::ID CCID, bool IsVarArg, + const SmallVectorImpl &Outs, + const SmallVectorImpl &OutVals, const SDLoc &DL, + SelectionDAG &DAG) const override; + + MachineBasicBlock *EmitLoweredSelect(MachineInstr &I, + MachineBasicBlock *BB) const; + MachineBasicBlock *EmitLoweredSegAlloca(MachineInstr &MI, + MachineBasicBlock *BB) const; + + /// Emit nodes that will be selected as "test Op0,Op0", or something + /// equivalent, for use with the given M68k condition code. + SDValue EmitTest(SDValue Op0, unsigned M68kCC, const SDLoc &dl, + SelectionDAG &DAG) const; + + /// Emit nodes that will be selected as "cmp Op0,Op1", or something + /// equivalent, for use with the given M68k condition code. + SDValue EmitCmp(SDValue Op0, SDValue Op1, unsigned M68kCC, const SDLoc &dl, + SelectionDAG &DAG) const; + + /// Check whether the call is eligible for tail call optimization. Targets + /// that want to do tail call optimization should implement this function. + bool IsEligibleForTailCallOptimization( + SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg, + bool isCalleeStructRet, bool isCallerStructRet, Type *RetTy, + const SmallVectorImpl &Outs, + const SmallVectorImpl &OutVals, + const SmallVectorImpl &Ins, SelectionDAG &DAG) const; + + SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override; +}; +} // namespace llvm + +#endif // M68kISELLOWERING_H Index: llvm/lib/Target/M68k/M68kISelLowering.cpp =================================================================== --- /dev/null +++ llvm/lib/Target/M68k/M68kISelLowering.cpp @@ -0,0 +1,3730 @@ +//===-- M68kISelLowering.cpp - M68k DAG Lowering Impl ------*- C++ -*--===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file defines the interfaces that M68k uses to lower LLVM code into a +/// selection DAG. +/// +//===----------------------------------------------------------------------===// + +#include "M68kISelLowering.h" +#include "M68kCallingConv.h" +#include "M68kMachineFunction.h" +#include "M68kSubtarget.h" +#include "M68kTargetMachine.h" +#include "M68kTargetObjectFile.h" + +#include "llvm/ADT/Statistic.h" +#include "llvm/CodeGen/CallingConvLower.h" +#include "llvm/CodeGen/MachineFrameInfo.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineJumpTableInfo.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/SelectionDAG.h" +#include "llvm/CodeGen/ValueTypes.h" +#include "llvm/IR/CallingConv.h" +#include "llvm/IR/DerivedTypes.h" +#include "llvm/IR/GlobalVariable.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/KnownBits.h" +#include "llvm/Support/raw_ostream.h" + +using namespace llvm; + +#define DEBUG_TYPE "M68k-isel" + +STATISTIC(NumTailCalls, "Number of tail calls"); + +M68kTargetLowering::M68kTargetLowering(const M68kTargetMachine &TM, + const M68kSubtarget &STI) + : TargetLowering(TM), Subtarget(STI), TM(TM) { + + MVT PtrVT = MVT::i32; + + setBooleanContents(ZeroOrOneBooleanContent); + + auto *RegInfo = Subtarget.getRegisterInfo(); + setStackPointerRegisterToSaveRestore(RegInfo->getStackRegister()); + + // TODO: computeRegisterInfo should able to infer this info + // ValueTypeActions.setTypeAction(MVT::i64, TypeExpandInteger); + + // NOTE The stuff that follows is true for M68000 + + // Set up the register classes. + addRegisterClass(MVT::i8, &M68k::DR8RegClass); + addRegisterClass(MVT::i16, &M68k::XR16RegClass); + addRegisterClass(MVT::i32, &M68k::XR32RegClass); + + for (auto VT : MVT::integer_valuetypes()) { + setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); + setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote); + setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote); + } + + // We don't accept any truncstore of integer registers. + setTruncStoreAction(MVT::i64, MVT::i32, Expand); + setTruncStoreAction(MVT::i64, MVT::i16, Expand); + setTruncStoreAction(MVT::i64, MVT::i8, Expand); + setTruncStoreAction(MVT::i32, MVT::i16, Expand); + setTruncStoreAction(MVT::i32, MVT::i8, Expand); + setTruncStoreAction(MVT::i16, MVT::i8, Expand); + + setOperationAction(ISD::MUL, MVT::i8, Promote); + setOperationAction(ISD::MUL, MVT::i16, Legal); + setOperationAction(ISD::MUL, MVT::i32, Custom); + setOperationAction(ISD::MUL, MVT::i64, LibCall); + + for (auto OP : + {ISD::SDIV, ISD::UDIV, ISD::SREM, ISD::UREM, ISD::UDIVREM, ISD::SDIVREM, + ISD::MULHS, ISD::MULHU, ISD::UMUL_LOHI, ISD::SMUL_LOHI}) { + setOperationAction(OP, MVT::i8, Promote); + setOperationAction(OP, MVT::i16, Legal); + setOperationAction(OP, MVT::i32, LibCall); + } + + for (auto OP : {ISD::UMUL_LOHI, ISD::SMUL_LOHI}) { + setOperationAction(OP, MVT::i8, Expand); + setOperationAction(OP, MVT::i16, Expand); + } + + for (auto OP : {ISD::SMULO, ISD::UMULO}) { + setOperationAction(OP, MVT::i8, Expand); + setOperationAction( + OP, MVT::i16, + Expand); // FIXME #14 something wrong with custom lowering here + setOperationAction(OP, MVT::i32, Expand); + } + + // Add/Sub overflow ops with MVT::Glues are lowered to CCR dependences. + for (auto VT : {MVT::i8, MVT::i16, MVT::i32}) { + setOperationAction(ISD::ADDC, VT, Custom); + setOperationAction(ISD::ADDE, VT, Custom); + setOperationAction(ISD::SUBC, VT, Custom); + setOperationAction(ISD::SUBE, VT, Custom); + } + + // SADDO and friends are legal with this setup, i hope + for (auto VT : {MVT::i8, MVT::i16, MVT::i32}) { + setOperationAction(ISD::SADDO, VT, Custom); + setOperationAction(ISD::UADDO, VT, Custom); + setOperationAction(ISD::SSUBO, VT, Custom); + setOperationAction(ISD::USUBO, VT, Custom); + } + + setOperationAction(ISD::BR_JT, MVT::Other, Expand); + setOperationAction(ISD::BRCOND, MVT::Other, Custom); + + for (auto VT : {MVT::i8, MVT::i16, MVT::i32}) { + setOperationAction(ISD::BR_CC, VT, Expand); + setOperationAction(ISD::SELECT, VT, Custom); + setOperationAction(ISD::SELECT_CC, VT, Expand); + setOperationAction(ISD::SETCC, VT, Custom); + setOperationAction(ISD::SETCCCARRY, VT, Custom); + } + + for (auto VT : {MVT::i8, MVT::i16, MVT::i32}) { + setOperationAction(ISD::BSWAP, VT, Expand); + setOperationAction(ISD::CTTZ, VT, Expand); + setOperationAction(ISD::CTLZ, VT, Expand); + setOperationAction(ISD::CTPOP, VT, Expand); + } + + setOperationAction(ISD::ConstantPool, MVT::i32, Custom); + setOperationAction(ISD::JumpTable, MVT::i32, Custom); + setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); + setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); + setOperationAction(ISD::ExternalSymbol, MVT::i32, Custom); + setOperationAction(ISD::BlockAddress, MVT::i32, Custom); + + setOperationAction(ISD::VASTART, MVT::Other, Custom); + setOperationAction(ISD::VAEND, MVT::Other, Expand); + setOperationAction(ISD::VAARG, MVT::Other, Expand); + setOperationAction(ISD::VACOPY, MVT::Other, Expand); + + setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); + setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); + + setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom); + + computeRegisterProperties(STI.getRegisterInfo()); + + // 2^2 bytes // ??? can it be just 2^1? + setMinFunctionAlignment(Align::Constant<2>()); +} + +EVT M68kTargetLowering::getSetCCResultType(const DataLayout &DL, + LLVMContext &Context, EVT VT) const { + // M68k SETcc producess either 0x00 or 0xFF + return MVT::i8; +} + +MVT M68kTargetLowering::getScalarShiftAmountTy(const DataLayout &DL, + EVT Ty) const { + if (Ty.isSimple()) { + return Ty.getSimpleVT(); + } + return MVT::getIntegerVT(8 * DL.getPointerSize(0)); +} + +#include "M68kGenCallingConv.inc" + +enum StructReturnType { NotStructReturn, RegStructReturn, StackStructReturn }; + +static StructReturnType +callIsStructReturn(const SmallVectorImpl &Outs) { + if (Outs.empty()) + return NotStructReturn; + + const ISD::ArgFlagsTy &Flags = Outs[0].Flags; + if (!Flags.isSRet()) + return NotStructReturn; + if (Flags.isInReg()) + return RegStructReturn; + return StackStructReturn; +} + +/// Determines whether a function uses struct return semantics. +static StructReturnType +argsAreStructReturn(const SmallVectorImpl &Ins) { + if (Ins.empty()) + return NotStructReturn; + + const ISD::ArgFlagsTy &Flags = Ins[0].Flags; + if (!Flags.isSRet()) + return NotStructReturn; + if (Flags.isInReg()) + return RegStructReturn; + return StackStructReturn; +} + +/// Make a copy of an aggregate at address specified by "Src" to address +/// "Dst" with size and alignment information specified by the specific +/// parameter attribute. The copy will be passed as a byval function parameter. +static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst, + SDValue Chain, ISD::ArgFlagsTy Flags, + SelectionDAG &DAG, const SDLoc &DL) { + SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), DL, MVT::i32); + + return DAG.getMemcpy( + Chain, DL, Dst, Src, SizeNode, Flags.getNonZeroByValAlign(), + /*isVolatile*/ false, /*AlwaysInline=*/true, + /*isTailCall*/ false, MachinePointerInfo(), MachinePointerInfo()); +} + +/// Return true if the calling convention is one that we can guarantee TCO for. +static bool canGuaranteeTCO(CallingConv::ID CC) { + return false; + // return CC == CallingConv::Fast; // TODO #7 Since M68010 only +} + +/// Return true if we might ever do TCO for calls with this calling convention. +static bool mayTailCallThisCC(CallingConv::ID CC) { + switch (CC) { + // C calling conventions: + case CallingConv::C: + return true; + default: + return canGuaranteeTCO(CC); + } +} + +/// Return true if the function is being made into a tailcall target by +/// changing its ABI. +static bool shouldGuaranteeTCO(CallingConv::ID CC, bool GuaranteedTailCallOpt) { + return GuaranteedTailCallOpt && canGuaranteeTCO(CC); +} + +/// Return true if the given stack call argument is already available in the +/// same position (relatively) of the caller's incoming argument stack. +static bool MatchingStackOffset(SDValue Arg, unsigned Offset, + ISD::ArgFlagsTy Flags, MachineFrameInfo &MFI, + const MachineRegisterInfo *MRI, + const M68kInstrInfo *TII, + const CCValAssign &VA) { + unsigned Bytes = Arg.getValueType().getSizeInBits() / 8; + + for (;;) { + // Look through nodes that don't alter the bits of the incoming value. + unsigned Op = Arg.getOpcode(); + if (Op == ISD::ZERO_EXTEND || Op == ISD::ANY_EXTEND || Op == ISD::BITCAST) { + Arg = Arg.getOperand(0); + continue; + } + if (Op == ISD::TRUNCATE) { + const SDValue &TruncInput = Arg.getOperand(0); + if (TruncInput.getOpcode() == ISD::AssertZext && + cast(TruncInput.getOperand(1))->getVT() == + Arg.getValueType()) { + Arg = TruncInput.getOperand(0); + continue; + } + } + break; + } + + int FI = INT_MAX; + if (Arg.getOpcode() == ISD::CopyFromReg) { + unsigned VR = cast(Arg.getOperand(1))->getReg(); + if (!Register::isVirtualRegister(VR)) + return false; + MachineInstr *Def = MRI->getVRegDef(VR); + if (!Def) + return false; + if (!Flags.isByVal()) { + if (!TII->isLoadFromStackSlot(*Def, FI)) + return false; + } else { + unsigned Opcode = Def->getOpcode(); + if ((Opcode == M68k::LEA32p || Opcode == M68k::LEA32f) && + Def->getOperand(1).isFI()) { + FI = Def->getOperand(1).getIndex(); + Bytes = Flags.getByValSize(); + } else + return false; + } + } else if (LoadSDNode *Ld = dyn_cast(Arg)) { + if (Flags.isByVal()) + // ByVal argument is passed in as a pointer but it's now being + // dereferenced. e.g. + // define @foo(%struct.X* %A) { + // tail call @bar(%struct.X* byval %A) + // } + return false; + SDValue Ptr = Ld->getBasePtr(); + FrameIndexSDNode *FINode = dyn_cast(Ptr); + if (!FINode) + return false; + FI = FINode->getIndex(); + } else if (Arg.getOpcode() == ISD::FrameIndex && Flags.isByVal()) { + FrameIndexSDNode *FINode = cast(Arg); + FI = FINode->getIndex(); + Bytes = Flags.getByValSize(); + } else + return false; + + assert(FI != INT_MAX); + if (!MFI.isFixedObjectIndex(FI)) + return false; + + if (Offset != MFI.getObjectOffset(FI)) + return false; + + if (VA.getLocVT().getSizeInBits() > Arg.getValueType().getSizeInBits()) { + // If the argument location is wider than the argument type, check that any + // extension flags match. + if (Flags.isZExt() != MFI.isObjectZExt(FI) || + Flags.isSExt() != MFI.isObjectSExt(FI)) { + return false; + } + } + + return Bytes == MFI.getObjectSize(FI); +} + +SDValue +M68kTargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const { + MachineFunction &MF = DAG.getMachineFunction(); + M68kMachineFunctionInfo *FuncInfo = MF.getInfo(); + int ReturnAddrIndex = FuncInfo->getRAIndex(); + + if (ReturnAddrIndex == 0) { + // Set up a frame object for the return address. + unsigned SlotSize = Subtarget.getSlotSize(); + ReturnAddrIndex = MF.getFrameInfo().CreateFixedObject( + SlotSize, -(int64_t)SlotSize, false); + FuncInfo->setRAIndex(ReturnAddrIndex); + } + + return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy(DAG.getDataLayout())); +} + +SDValue M68kTargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG, + SDValue &OutRetAddr, + SDValue Chain, + bool IsTailCall, int FPDiff, + const SDLoc &DL) const { + EVT VT = getPointerTy(DAG.getDataLayout()); + OutRetAddr = getReturnAddressFrameIndex(DAG); + + // Load the "old" Return address. + OutRetAddr = DAG.getLoad(VT, DL, Chain, OutRetAddr, MachinePointerInfo()); + return SDValue(OutRetAddr.getNode(), 1); +} + +SDValue M68kTargetLowering::EmitTailCallStoreRetAddr( + SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue RetFI, + EVT PtrVT, unsigned SlotSize, int FPDiff, const SDLoc &DL) const { + if (!FPDiff) + return Chain; + + // Calculate the new stack slot for the return address. + int NewFO = MF.getFrameInfo().CreateFixedObject( + SlotSize, (int64_t)FPDiff - SlotSize, false); + + SDValue NewFI = DAG.getFrameIndex(NewFO, PtrVT); + // Store the return address to the appropriate stack slot. + Chain = DAG.getStore( + Chain, DL, RetFI, NewFI, + MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), NewFO)); + return Chain; +} + +SDValue +M68kTargetLowering::LowerMemArgument(SDValue Chain, CallingConv::ID CallConv, + const SmallVectorImpl &Ins, + const SDLoc &DL, SelectionDAG &DAG, + const CCValAssign &VA, + MachineFrameInfo &MFI, unsigned i) const { + // Create the nodes corresponding to a load from this parameter slot. + ISD::ArgFlagsTy Flags = Ins[i].Flags; + EVT ValVT; + + // If value is passed by pointer we have address passed instead of the value + // itself. + if (VA.getLocInfo() == CCValAssign::Indirect) + ValVT = VA.getLocVT(); + else + ValVT = VA.getValVT(); + + // Because we are dealing with BE architecture we need to offset loading of + // partial types + int Offset = VA.getLocMemOffset(); + if (VA.getValVT() == MVT::i8) { + Offset += 3; + } else if (VA.getValVT() == MVT::i16) { + Offset += 2; + } + + // Calculate SP offset of interrupt parameter, re-arrange the slot normally + // taken by a return address. + // TODO #10 interrupts + // if (CallConv == CallingConv::M68k_INTR) { + // const M68kSubtarget& Subtarget = + // static_cast(DAG.getSubtarget()); + // // M68k interrupts may take one or two arguments. + // // On the stack there will be no return address as in regular call. + // // Offset of last argument need to be set to -4/-8 bytes. + // // Where offset of the first argument out of two, should be set to 0 + // bytes. Offset = (Subtarget.is64Bit() ? 8 : 4) * ((i + 1) % Ins.size() - + // 1); + // } + + // FIXME #15 For now, all byval parameter objects are marked mutable. This can + // be changed with more analysis. In case of tail call optimization mark all + // arguments mutable. Since they could be overwritten by lowering of arguments + // in case of a tail call. + bool AlwaysUseMutable = shouldGuaranteeTCO( + CallConv, DAG.getTarget().Options.GuaranteedTailCallOpt); + bool isImmutable = !AlwaysUseMutable && !Flags.isByVal(); + + if (Flags.isByVal()) { + unsigned Bytes = Flags.getByValSize(); + if (Bytes == 0) + Bytes = 1; // Don't create zero-sized stack objects. + int FI = MFI.CreateFixedObject(Bytes, Offset, isImmutable); + // Adjust SP offset of interrupt parameter. + // TODO #10 interrupts + // if (CallConv == CallingConv::M68k_INTR) { + // MFI.setObjectOffset(FI, Offset); + // } + return DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); + } else { + int FI = + MFI.CreateFixedObject(ValVT.getSizeInBits() / 8, Offset, isImmutable); + + // Set SExt or ZExt flag. + if (VA.getLocInfo() == CCValAssign::ZExt) { + MFI.setObjectZExt(FI, true); + } else if (VA.getLocInfo() == CCValAssign::SExt) { + MFI.setObjectSExt(FI, true); + } + + // Adjust SP offset of interrupt parameter. + // TODO #10 interrupts + // if (CallConv == CallingConv::M68k_INTR) { + // MFI.setObjectOffset(FI, Offset); + // } + + SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); + SDValue Val = DAG.getLoad( + ValVT, DL, Chain, FIN, + MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)); + return VA.isExtInLoc() ? DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val) + : Val; + } +} + +SDValue M68kTargetLowering::LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, + SDValue Arg, const SDLoc &DL, + SelectionDAG &DAG, + const CCValAssign &VA, + ISD::ArgFlagsTy Flags) const { + unsigned LocMemOffset = VA.getLocMemOffset(); + SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, DL); + PtrOff = DAG.getNode(ISD::ADD, DL, getPointerTy(DAG.getDataLayout()), + StackPtr, PtrOff); + if (Flags.isByVal()) + return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, DL); + + return DAG.getStore( + Chain, DL, Arg, PtrOff, + MachinePointerInfo::getStack(DAG.getMachineFunction(), LocMemOffset)); +} + +//===----------------------------------------------------------------------===// +// Call +//===----------------------------------------------------------------------===// + +SDValue M68kTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, + SmallVectorImpl &InVals) const { + SelectionDAG &DAG = CLI.DAG; + SDLoc &DL = CLI.DL; + SmallVectorImpl &Outs = CLI.Outs; + SmallVectorImpl &OutVals = CLI.OutVals; + SmallVectorImpl &Ins = CLI.Ins; + SDValue Chain = CLI.Chain; + SDValue Callee = CLI.Callee; + CallingConv::ID CallConv = CLI.CallConv; + bool &isTailCall = CLI.IsTailCall; + bool isVarArg = CLI.IsVarArg; + + MachineFunction &MF = DAG.getMachineFunction(); + StructReturnType SR = callIsStructReturn(Outs); + bool IsSibcall = false; + M68kMachineFunctionInfo *MFI = MF.getInfo(); + // const M68kRegisterInfo *TRI = Subtarget.getRegisterInfo(); + + // TODO #10 interrupts + // if (CallConv == CallingConv::M68k_INTR) + // report_fatal_error("M68k interrupts may not be called directly"); + + auto Attr = MF.getFunction().getFnAttribute("disable-tail-calls"); + if (Attr.getValueAsString() == "true") + isTailCall = false; + + // FIXME #7 Add tailcalls support + // if (Subtarget.isPICStyleGOT() && + // !MF.getTarget().Options.GuaranteedTailCallOpt) { + // // If we are using a GOT, disable tail calls to external symbols with + // // default visibility. Tail calling such a symbol requires using a GOT + // // relocation, which forces early binding of the symbol. This breaks code + // // that require lazy function symbol resolution. Using musttail or + // // GuaranteedTailCallOpt will override this. + // GlobalAddressSDNode *G = dyn_cast(Callee); + // if (!G || (!G->getGlobal()->hasLocalLinkage() && + // G->getGlobal()->hasDefaultVisibility())) + // isTailCall = false; + // } + + bool IsMustTail = CLI.CB && CLI.CB->isMustTailCall(); + if (IsMustTail) { + // Force this to be a tail call. The verifier rules are enough to ensure + // that we can lower this successfully without moving the return address + // around. + isTailCall = true; + } else if (isTailCall) { + // Check if it's really possible to do a tail call. + isTailCall = IsEligibleForTailCallOptimization( + Callee, CallConv, isVarArg, SR != NotStructReturn, + MF.getFunction().hasStructRetAttr(), CLI.RetTy, Outs, OutVals, Ins, + DAG); + + // Sibcalls are automatically detected tailcalls which do not require + // ABI changes. + if (!MF.getTarget().Options.GuaranteedTailCallOpt && isTailCall) + IsSibcall = true; + + if (isTailCall) + ++NumTailCalls; + } + + assert(!(isVarArg && canGuaranteeTCO(CallConv)) && + "Var args not supported with calling convention fastcc"); + + // Analyze operands of the call, assigning locations to each operand. + SmallVector ArgLocs; + // It is empty for LibCall + const Function *CalleeFunc = CLI.CB ? CLI.CB->getCalledFunction() : nullptr; + M68kCCState CCInfo(*CalleeFunc, CallConv, isVarArg, MF, ArgLocs, + *DAG.getContext()); + CCInfo.AnalyzeCallOperands(Outs, CC_M68k); + + // Get a count of how many bytes are to be pushed on the stack. + unsigned NumBytes = CCInfo.getAlignedCallFrameSize(); + if (IsSibcall) { + // This is a sibcall. The memory operands are available in caller's + // own caller's stack. + NumBytes = 0; + } else if (MF.getTarget().Options.GuaranteedTailCallOpt && + canGuaranteeTCO(CallConv)) { + NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG); + } + + // TODO #44 debug this: + int FPDiff = 0; + if (isTailCall && !IsSibcall && !IsMustTail) { + // Lower arguments at fp - stackoffset + fpdiff. + unsigned NumBytesCallerPushed = MFI->getBytesToPopOnReturn(); + + FPDiff = NumBytesCallerPushed - NumBytes; + + // Set the delta of movement of the returnaddr stackslot. + // But only set if delta is greater than previous delta. + if (FPDiff < MFI->getTCReturnAddrDelta()) + MFI->setTCReturnAddrDelta(FPDiff); + } + + unsigned NumBytesToPush = NumBytes; + unsigned NumBytesToPop = NumBytes; + + // If we have an inalloca argument, all stack space has already been allocated + // for us and be right at the top of the stack. We don't support multiple + // arguments passed in memory when using inalloca. + if (!Outs.empty() && Outs.back().Flags.isInAlloca()) { + NumBytesToPush = 0; + if (!ArgLocs.back().isMemLoc()) + report_fatal_error("cannot use inalloca attribute on a register " + "parameter"); + if (ArgLocs.back().getLocMemOffset() != 0) + report_fatal_error("any parameter with the inalloca attribute must be " + "the only memory argument"); + } + + if (!IsSibcall) + Chain = DAG.getCALLSEQ_START(Chain, NumBytesToPush, + NumBytes - NumBytesToPush, DL); + + SDValue RetFI; + // Load return address for tail calls. + if (isTailCall && FPDiff) + Chain = EmitTailCallLoadRetAddr(DAG, RetFI, Chain, isTailCall, FPDiff, DL); + + SmallVector, 8> RegsToPass; + SmallVector MemOpChains; + SDValue StackPtr; + + // Walk the register/memloc assignments, inserting copies/loads. In the case + // of tail call optimization arguments are handle later. + const M68kRegisterInfo *RegInfo = Subtarget.getRegisterInfo(); + for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { + ISD::ArgFlagsTy Flags = Outs[i].Flags; + + // Skip inalloca arguments, they have already been written. + if (Flags.isInAlloca()) + continue; + + CCValAssign &VA = ArgLocs[i]; + EVT RegVT = VA.getLocVT(); + SDValue Arg = OutVals[i]; + bool isByVal = Flags.isByVal(); + + // Promote the value if needed. + switch (VA.getLocInfo()) { + default: + llvm_unreachable("Unknown loc info!"); + case CCValAssign::Full: + break; + case CCValAssign::SExt: + Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, RegVT, Arg); + break; + case CCValAssign::ZExt: + Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, RegVT, Arg); + break; + case CCValAssign::AExt: + Arg = DAG.getNode(ISD::ANY_EXTEND, DL, RegVT, Arg); + break; + case CCValAssign::BCvt: + Arg = DAG.getBitcast(RegVT, Arg); + break; + case CCValAssign::Indirect: { + // Store the argument. + SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT()); + int FI = cast(SpillSlot)->getIndex(); + Chain = DAG.getStore( + Chain, DL, Arg, SpillSlot, + MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)); + Arg = SpillSlot; + break; + } + } + + if (VA.isRegLoc()) { + RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); + } else if (!IsSibcall && (!isTailCall || isByVal)) { + assert(VA.isMemLoc()); + if (!StackPtr.getNode()) { + StackPtr = DAG.getCopyFromReg(Chain, DL, RegInfo->getStackRegister(), + getPointerTy(DAG.getDataLayout())); + } + MemOpChains.push_back( + LowerMemOpCallTo(Chain, StackPtr, Arg, DL, DAG, VA, Flags)); + } + } + + if (!MemOpChains.empty()) + Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains); + + // FIXME #16 Fix PIC style GOT + // ??? The only time GOT is really needed is for Medium-PIC static data + // ??? otherwise we are happy with pc-rel or static references + // if (Subtarget.isPICStyleGOT()) { + // // ELF / PIC requires GOT in the %BP register before function calls via + // PLT + // // GOT pointer. + // if (!isTailCall) { + // RegsToPass.push_back(std::make_pair( + // unsigned(TRI->getBaseRegister()), + // DAG.getNode(M68kISD::GLOBAL_BASE_REG, SDLoc(), + // getPointerTy(DAG.getDataLayout())))); + // } else { + // // ??? WUT, debug this + // // If we are tail calling and generating PIC/GOT style code load the + // // address of the callee into %A1. The value in %A1 is used as target + // of + // // the tail jump. This is done to circumvent the %BP/callee-saved + // problem + // // for tail calls on PIC/GOT architectures. Normally we would just put + // the + // // address of GOT into %BP and then call target@PLT. But for tail calls + // // %BP would be restored (since %BP is callee saved) before jumping to + // the + // // target@PLT. + // + // // NOTE: The actual moving to %A1 is done further down. + // GlobalAddressSDNode *G = dyn_cast(Callee); + // if (G && !G->getGlobal()->hasLocalLinkage() && + // G->getGlobal()->hasDefaultVisibility()) + // Callee = LowerGlobalAddress(Callee, DAG); + // else if (isa(Callee)) + // Callee = LowerExternalSymbol(Callee, DAG); + // } + // } + + if (isVarArg && IsMustTail) { + const auto &Forwards = MFI->getForwardedMustTailRegParms(); + for (const auto &F : Forwards) { + SDValue Val = DAG.getCopyFromReg(Chain, DL, F.VReg, F.VT); + RegsToPass.push_back(std::make_pair(unsigned(F.PReg), Val)); + } + } + + // For tail calls lower the arguments to the 'real' stack slots. Sibcalls + // don't need this because the eligibility check rejects calls that require + // shuffling arguments passed in memory. + if (!IsSibcall && isTailCall) { + // Force all the incoming stack arguments to be loaded from the stack + // before any new outgoing arguments are stored to the stack, because the + // outgoing stack slots may alias the incoming argument stack slots, and + // the alias isn't otherwise explicit. This is slightly more conservative + // than necessary, because it means that each store effectively depends + // on every argument instead of just those arguments it would clobber. + SDValue ArgChain = DAG.getStackArgumentTokenFactor(Chain); + + SmallVector MemOpChains2; + SDValue FIN; + int FI = 0; + for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { + CCValAssign &VA = ArgLocs[i]; + if (VA.isRegLoc()) + continue; + assert(VA.isMemLoc()); + SDValue Arg = OutVals[i]; + ISD::ArgFlagsTy Flags = Outs[i].Flags; + // Skip inalloca arguments. They don't require any work. + if (Flags.isInAlloca()) + continue; + // Create frame index. + int32_t Offset = VA.getLocMemOffset() + FPDiff; + uint32_t OpSize = (VA.getLocVT().getSizeInBits() + 7) / 8; + FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true); + FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); + + if (Flags.isByVal()) { + // Copy relative to framepointer. + SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset(), DL); + if (!StackPtr.getNode()) { + StackPtr = DAG.getCopyFromReg(Chain, DL, RegInfo->getStackRegister(), + getPointerTy(DAG.getDataLayout())); + } + Source = DAG.getNode(ISD::ADD, DL, getPointerTy(DAG.getDataLayout()), + StackPtr, Source); + + MemOpChains2.push_back( + CreateCopyOfByValArgument(Source, FIN, ArgChain, Flags, DAG, DL)); + } else { + // Store relative to framepointer. + MemOpChains2.push_back(DAG.getStore( + ArgChain, DL, Arg, FIN, + MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI))); + } + } + + if (!MemOpChains2.empty()) + Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains2); + + // Store the return address to the appropriate stack slot. + Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetFI, + getPointerTy(DAG.getDataLayout()), + Subtarget.getSlotSize(), FPDiff, DL); + } + + // Build a sequence of copy-to-reg nodes chained together with token chain + // and flag operands which copy the outgoing args into registers. + SDValue InFlag; + for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { + Chain = DAG.getCopyToReg(Chain, DL, RegsToPass[i].first, + RegsToPass[i].second, InFlag); + InFlag = Chain.getValue(1); + } + + if (Callee->getOpcode() == ISD::GlobalAddress) { + // If the callee is a GlobalAddress node (quite common, every direct call + // is) turn it into a TargetGlobalAddress node so that legalize doesn't hack + // it. + GlobalAddressSDNode *G = cast(Callee); + + // We should use extra load for direct calls to dllimported functions in + // non-JIT mode. + const GlobalValue *GV = G->getGlobal(); + if (!GV->hasDLLImportStorageClass()) { + unsigned char OpFlags = Subtarget.classifyGlobalFunctionReference(GV); + + Callee = DAG.getTargetGlobalAddress( + GV, DL, getPointerTy(DAG.getDataLayout()), G->getOffset(), OpFlags); + + if (OpFlags == M68kII::MO_GOTPCREL) { + + // Add a wrapper. + Callee = DAG.getNode(M68kISD::WrapperPC, DL, + getPointerTy(DAG.getDataLayout()), Callee); + + // Add extra indirection + Callee = DAG.getLoad( + getPointerTy(DAG.getDataLayout()), DL, DAG.getEntryNode(), Callee, + MachinePointerInfo::getGOT(DAG.getMachineFunction())); + } + } + } else if (ExternalSymbolSDNode *S = dyn_cast(Callee)) { + const Module *Mod = DAG.getMachineFunction().getFunction().getParent(); + unsigned char OpFlags = + Subtarget.classifyGlobalFunctionReference(nullptr, *Mod); + + Callee = DAG.getTargetExternalSymbol( + S->getSymbol(), getPointerTy(DAG.getDataLayout()), OpFlags); + } + + // Returns a chain & a flag for retval copy to use. + SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); + SmallVector Ops; + + if (!IsSibcall && isTailCall) { + Chain = DAG.getCALLSEQ_END(Chain, + DAG.getIntPtrConstant(NumBytesToPop, DL, true), + DAG.getIntPtrConstant(0, DL, true), InFlag, DL); + InFlag = Chain.getValue(1); + } + + Ops.push_back(Chain); + Ops.push_back(Callee); + + if (isTailCall) + Ops.push_back(DAG.getConstant(FPDiff, DL, MVT::i32)); + + // Add argument registers to the end of the list so that they are known live + // into the call. + for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) + Ops.push_back(DAG.getRegister(RegsToPass[i].first, + RegsToPass[i].second.getValueType())); + + // Add a register mask operand representing the call-preserved registers. + const uint32_t *Mask = RegInfo->getCallPreservedMask(MF, CallConv); + assert(Mask && "Missing call preserved mask for calling convention"); + + Ops.push_back(DAG.getRegisterMask(Mask)); + + if (InFlag.getNode()) + Ops.push_back(InFlag); + + if (isTailCall) { + MF.getFrameInfo().setHasTailCall(); + return DAG.getNode(M68kISD::TC_RETURN, DL, NodeTys, Ops); + } + + Chain = DAG.getNode(M68kISD::CALL, DL, NodeTys, Ops); + InFlag = Chain.getValue(1); + + // Create the CALLSEQ_END node. + unsigned NumBytesForCalleeToPop; + if (M68k::isCalleePop(CallConv, isVarArg, + DAG.getTarget().Options.GuaranteedTailCallOpt)) { + NumBytesForCalleeToPop = NumBytes; // Callee pops everything + } else if (!canGuaranteeTCO(CallConv) && SR == StackStructReturn) { + // If this is a call to a struct-return function, the callee + // pops the hidden struct pointer, so we have to push it back. + NumBytesForCalleeToPop = 4; + } else { + NumBytesForCalleeToPop = 0; // Callee pops nothing. + } + + if (CLI.DoesNotReturn && !getTargetMachine().Options.TrapUnreachable) { + // No need to reset the stack after the call if the call doesn't return. To + // make the MI verify, we'll pretend the callee does it for us. + NumBytesForCalleeToPop = NumBytes; + } + + // Returns a flag for retval copy to use. + if (!IsSibcall) { + Chain = DAG.getCALLSEQ_END( + Chain, DAG.getIntPtrConstant(NumBytesToPop, DL, true), + DAG.getIntPtrConstant(NumBytesForCalleeToPop, DL, true), InFlag, DL); + InFlag = Chain.getValue(1); + } + + // Handle result values, copying them out of physregs into vregs that we + // return. + return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, DL, DAG, + InVals); +} + +SDValue M68kTargetLowering::LowerCallResult( + SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg, + const SmallVectorImpl &Ins, const SDLoc &DL, + SelectionDAG &DAG, SmallVectorImpl &InVals) const { + + // Assign locations to each value returned by this call. + SmallVector RVLocs; + CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, + *DAG.getContext()); + CCInfo.AnalyzeCallResult(Ins, RetCC_M68k); + + // Copy all of the result registers out of their specified physreg. + for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { + CCValAssign &VA = RVLocs[i]; + EVT CopyVT = VA.getLocVT(); + + /// ??? is this correct? + Chain = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), CopyVT, InFlag) + .getValue(1); + SDValue Val = Chain.getValue(0); + + if (VA.isExtInLoc() && VA.getValVT().getScalarType() == MVT::i1) + Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val); + + InFlag = Chain.getValue(2); + InVals.push_back(Val); + } + + return Chain; +} + +//===----------------------------------------------------------------------===// +// Formal Arguments Calling Convention Implementation +//===----------------------------------------------------------------------===// + +SDValue M68kTargetLowering::LowerFormalArguments( + SDValue Chain, CallingConv::ID CCID, bool isVarArg, + const SmallVectorImpl &Ins, const SDLoc &DL, + SelectionDAG &DAG, SmallVectorImpl &InVals) const { + MachineFunction &MF = DAG.getMachineFunction(); + M68kMachineFunctionInfo *MMFI = MF.getInfo(); + // const TargetFrameLowering &TFL = *Subtarget.getFrameLowering(); + + MachineFrameInfo &MFI = MF.getFrameInfo(); + + // TODO #10 interrupts... + // if (CCID == CallingConv::M68k_INTR) { + // bool isLegal = Ins.size() == 1 || + // (Ins.size() == 2 && ((Is64Bit && Ins[1].VT == MVT::i64) || + // (!Is64Bit && Ins[1].VT == + // MVT::i32))); + // if (!isLegal) + // report_fatal_error("M68k interrupts may take one or two arguments"); + // } + + // Assign locations to all of the incoming arguments. + SmallVector ArgLocs; + M68kCCState CCInfo(MF.getFunction(), CCID, isVarArg, MF, ArgLocs, + *DAG.getContext()); + + CCInfo.AnalyzeFormalArguments(Ins, CC_M68k); + + unsigned LastVal = ~0U; + SDValue ArgValue; + for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { + CCValAssign &VA = ArgLocs[i]; + assert(VA.getValNo() != LastVal && "Same value in different locations"); + + LastVal = VA.getValNo(); + + if (VA.isRegLoc()) { + EVT RegVT = VA.getLocVT(); + const TargetRegisterClass *RC; + if (RegVT == MVT::i32) + RC = &M68k::XR32RegClass; + else + llvm_unreachable("Unknown argument type!"); + + unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); + ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, RegVT); + + // If this is an 8 or 16-bit value, it is really passed promoted to 32 + // bits. Insert an assert[sz]ext to capture this, then truncate to the + // right size. + if (VA.getLocInfo() == CCValAssign::SExt) { + ArgValue = DAG.getNode(ISD::AssertSext, DL, RegVT, ArgValue, + DAG.getValueType(VA.getValVT())); + } else if (VA.getLocInfo() == CCValAssign::ZExt) { + ArgValue = DAG.getNode(ISD::AssertZext, DL, RegVT, ArgValue, + DAG.getValueType(VA.getValVT())); + } else if (VA.getLocInfo() == CCValAssign::BCvt) { + ArgValue = DAG.getBitcast(VA.getValVT(), ArgValue); + } + + if (VA.isExtInLoc()) { + ArgValue = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), ArgValue); + } + } else { + assert(VA.isMemLoc()); + ArgValue = LowerMemArgument(Chain, CCID, Ins, DL, DAG, VA, MFI, i); + } + + // If value is passed via pointer - do a load. + // TODO #45 debug how this really works + // ??? May I remove this indirect shizzle? + if (VA.getLocInfo() == CCValAssign::Indirect) + ArgValue = + DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue, MachinePointerInfo()); + + InVals.push_back(ArgValue); + } + + for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { + // Swift calling convention does not require we copy the sret argument + // into %D0 for the return. We don't set SRetReturnReg for Swift. + if (CCID == CallingConv::Swift) + continue; + + // ABI require that for returning structs by value we copy the sret argument + // into %D0 for the return. Save the argument into a virtual register so + // that we can access it from the return points. + if (Ins[i].Flags.isSRet()) { + unsigned Reg = MMFI->getSRetReturnReg(); + if (!Reg) { + MVT PtrTy = getPointerTy(DAG.getDataLayout()); + Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrTy)); + MMFI->setSRetReturnReg(Reg); + } + SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), DL, Reg, InVals[i]); + Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Copy, Chain); + break; + } + } + + unsigned StackSize = CCInfo.getNextStackOffset(); + // Align stack specially for tail calls. + if (shouldGuaranteeTCO(CCID, MF.getTarget().Options.GuaranteedTailCallOpt)) + StackSize = GetAlignedArgumentStackSize(StackSize, DAG); + + // If the function takes variable number of arguments, make a frame index for + // the start of the first vararg value... for expansion of llvm.va_start. We + // can skip this if there are no va_start calls. + if (MFI.hasVAStart()) { + MMFI->setVarArgsFrameIndex(MFI.CreateFixedObject(1, StackSize, true)); + } + + if (isVarArg && MFI.hasMustTailInVarArgFunc()) { + // We forward some GPRs and some vector types. + SmallVector RegParmTypes; + MVT IntVT = MVT::i32; + RegParmTypes.push_back(IntVT); + + // Compute the set of forwarded registers. The rest are scratch. + // ??? what is this for? + SmallVectorImpl &Forwards = + MMFI->getForwardedMustTailRegParms(); + CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes, CC_M68k); + + // Copy all forwards from physical to virtual registers. + for (ForwardedRegister &F : Forwards) { + // FIXME #7 Can we use a less constrained schedule? + SDValue RegVal = DAG.getCopyFromReg(Chain, DL, F.VReg, F.VT); + F.VReg = MF.getRegInfo().createVirtualRegister(getRegClassFor(F.VT)); + Chain = DAG.getCopyToReg(Chain, DL, F.VReg, RegVal); + } + } + + // Some CCs need callee pop. + if (M68k::isCalleePop(CCID, isVarArg, + MF.getTarget().Options.GuaranteedTailCallOpt)) { + MMFI->setBytesToPopOnReturn(StackSize); // Callee pops everything. + // } else if (CCID == CallingConv::M68k_INTR && Ins.size() == 2) { + // // M68k interrupts must pop the error code if present + // MMFI->setBytesToPopOnReturn(4); + } else { + MMFI->setBytesToPopOnReturn(0); // Callee pops nothing. + // If this is an sret function, the return should pop the hidden pointer. + if (!canGuaranteeTCO(CCID) && argsAreStructReturn(Ins) == StackStructReturn) + MMFI->setBytesToPopOnReturn(4); + } + + // if (!Is64Bit) { + // // RegSaveFrameIndex is M68k-64 only. + // MMFI->setRegSaveFrameIndex(0xAAAAAAA); + // if (CCID == CallingConv::M68k_FastCall || + // CCID == CallingConv::M68k_ThisCall) + // // fastcc functions can't have varargs. + // MMFI->setVarArgsFrameIndex(0xAAAAAAA); + // } + + MMFI->setArgumentStackSize(StackSize); + + return Chain; +} + +//===----------------------------------------------------------------------===// +// Return Value Calling Convention Implementation +//===----------------------------------------------------------------------===// + +SDValue +M68kTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CCID, + bool isVarArg, + const SmallVectorImpl &Outs, + const SmallVectorImpl &OutVals, + const SDLoc &DL, SelectionDAG &DAG) const { + MachineFunction &MF = DAG.getMachineFunction(); + M68kMachineFunctionInfo *MFI = MF.getInfo(); + + SmallVector RVLocs; + CCState CCInfo(CCID, isVarArg, MF, RVLocs, *DAG.getContext()); + CCInfo.AnalyzeReturn(Outs, RetCC_M68k); + + SDValue Flag; + SmallVector RetOps; + // Operand #0 = Chain (updated below) + RetOps.push_back(Chain); + // Operand #1 = Bytes To Pop + RetOps.push_back( + DAG.getTargetConstant(MFI->getBytesToPopOnReturn(), DL, MVT::i32)); + + // Copy the result values into the output registers. + for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { + CCValAssign &VA = RVLocs[i]; + assert(VA.isRegLoc() && "Can only return in registers!"); + SDValue ValToCopy = OutVals[i]; + EVT ValVT = ValToCopy.getValueType(); + + // Promote values to the appropriate types. + if (VA.getLocInfo() == CCValAssign::SExt) + ValToCopy = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), ValToCopy); + else if (VA.getLocInfo() == CCValAssign::ZExt) + ValToCopy = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), ValToCopy); + else if (VA.getLocInfo() == CCValAssign::AExt) { + if (ValVT.isVector() && ValVT.getVectorElementType() == MVT::i1) + ValToCopy = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), ValToCopy); + else + ValToCopy = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), ValToCopy); + } else if (VA.getLocInfo() == CCValAssign::BCvt) + ValToCopy = DAG.getBitcast(VA.getLocVT(), ValToCopy); + + Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), ValToCopy, Flag); + Flag = Chain.getValue(1); + RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); + } + + // Swift calling convention does not require we copy the sret argument + // into %d0 for the return, and SRetReturnReg is not set for Swift. + + // ABI require that for returning structs by value we copy the sret argument + // into %D0 for the return. Save the argument into a virtual register so that + // we can access it from the return points. + // + // Checking Function.hasStructRetAttr() here is insufficient because the IR + // may not have an explicit sret argument. If MFI.CanLowerReturn is + // false, then an sret argument may be implicitly inserted in the SelDAG. In + // either case MFI->setSRetReturnReg() will have been called. + if (unsigned SRetReg = MFI->getSRetReturnReg()) { + // ??? Can i just move this to the top and escape this explanation? + // When we have both sret and another return value, we should use the + // original Chain stored in RetOps[0], instead of the current Chain updated + // in the above loop. If we only have sret, RetOps[0] equals to Chain. + + // For the case of sret and another return value, we have + // Chain_0 at the function entry + // Chain_1 = getCopyToReg(Chain_0) in the above loop + // If we use Chain_1 in getCopyFromReg, we will have + // Val = getCopyFromReg(Chain_1) + // Chain_2 = getCopyToReg(Chain_1, Val) from below + + // getCopyToReg(Chain_0) will be glued together with + // getCopyToReg(Chain_1, Val) into Unit A, getCopyFromReg(Chain_1) will be + // in Unit B, and we will have cyclic dependency between Unit A and Unit B: + // Data dependency from Unit B to Unit A due to usage of Val in + // getCopyToReg(Chain_1, Val) + // Chain dependency from Unit A to Unit B + + // So here, we use RetOps[0] (i.e Chain_0) for getCopyFromReg. + SDValue Val = DAG.getCopyFromReg(RetOps[0], DL, SRetReg, + getPointerTy(MF.getDataLayout())); + + // ??? How will this work if CC does not use registers for args passing? + // ??? What if I return multiple structs? + unsigned RetValReg = M68k::D0; + Chain = DAG.getCopyToReg(Chain, DL, RetValReg, Val, Flag); + Flag = Chain.getValue(1); + + RetOps.push_back( + DAG.getRegister(RetValReg, getPointerTy(DAG.getDataLayout()))); + } + + // ??? What is it doing? + // const M68kRegisterInfo *TRI = Subtarget.getRegisterInfo(); + // const MCPhysReg *I = + // TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction()); + // if (I) { + // for (; *I; ++I) { + // if (M68k::GR64RegClass.contains(*I)) + // RetOps.push_back(DAG.getRegister(*I, MVT::i64)); + // else + // llvm_unreachable("Unexpected register class in CSRsViaCopy!"); + // } + // } + + RetOps[0] = Chain; // Update chain. + + // Add the flag if we have it. + if (Flag.getNode()) + RetOps.push_back(Flag); + + return DAG.getNode(M68kISD::RET, DL, MVT::Other, RetOps); +} + +//===----------------------------------------------------------------------===// +// Fast Calling Convention (tail call) implementation +//===----------------------------------------------------------------------===// + +// Like std call, callee cleans arguments, convention except that ECX is +// reserved for storing the tail called function address. Only 2 registers are +// free for argument passing (inreg). Tail call optimization is performed +// provided: +// * tailcallopt is enabled +// * caller/callee are fastcc +// On M68k_64 architecture with GOT-style position independent code only +// local (within module) calls are supported at the moment. To keep the stack +// aligned according to platform abi the function GetAlignedArgumentStackSize +// ensures that argument delta is always multiples of stack alignment. (Dynamic +// linkers need this - darwin's dyld for example) If a tail called function +// callee has more arguments than the caller the caller needs to make sure that +// there is room to move the RETADDR to. This is achieved by reserving an area +// the size of the argument delta right after the original RETADDR, but before +// the saved framepointer or the spilled registers e.g. caller(arg1, arg2) +// calls callee(arg1, arg2,arg3,arg4) stack layout: +// arg1 +// arg2 +// RETADDR +// [ new RETADDR +// move area ] +// (possible EBP) +// ESI +// EDI +// local1 .. + +/// Make the stack size align e.g 16n + 12 aligned for a 16-byte align +/// requirement. +unsigned +M68kTargetLowering::GetAlignedArgumentStackSize(unsigned StackSize, + SelectionDAG &DAG) const { + const TargetFrameLowering &TFI = *Subtarget.getFrameLowering(); + unsigned StackAlignment = TFI.getStackAlignment(); + uint64_t AlignMask = StackAlignment - 1; + int64_t Offset = StackSize; + unsigned SlotSize = Subtarget.getSlotSize(); + if ((Offset & AlignMask) <= (StackAlignment - SlotSize)) { + // Number smaller than 12 so just add the difference. + Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask)); + } else { + // Mask out lower bits, add stackalignment once plus the 12 bytes. + Offset = + ((~AlignMask) & Offset) + StackAlignment + (StackAlignment - SlotSize); + } + return Offset; +} + +/// Check whether the call is eligible for tail call optimization. Targets +/// that want to do tail call optimization should implement this function. +bool M68kTargetLowering::IsEligibleForTailCallOptimization( + SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg, + bool isCalleeStructRet, bool isCallerStructRet, Type *RetTy, + const SmallVectorImpl &Outs, + const SmallVectorImpl &OutVals, + const SmallVectorImpl &Ins, SelectionDAG &DAG) const { + if (!mayTailCallThisCC(CalleeCC)) + return false; + + // If -tailcallopt is specified, make fastcc functions tail-callable. + MachineFunction &MF = DAG.getMachineFunction(); + const auto &CallerF = MF.getFunction(); + + CallingConv::ID CallerCC = CallerF.getCallingConv(); + bool CCMatch = CallerCC == CalleeCC; + + if (DAG.getTarget().Options.GuaranteedTailCallOpt) { + if (canGuaranteeTCO(CalleeCC) && CCMatch) + return true; + return false; + } + + // Look for obvious safe cases to perform tail call optimization that do not + // require ABI changes. This is what gcc calls sibcall. + + // Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to + // emit a special epilogue. + const M68kRegisterInfo *RegInfo = Subtarget.getRegisterInfo(); + if (RegInfo->needsStackRealignment(MF)) + return false; + + // Also avoid sibcall optimization if either caller or callee uses struct + // return semantics. + if (isCalleeStructRet || isCallerStructRet) + return false; + + // Do not sibcall optimize vararg calls unless all arguments are passed via + // registers. + LLVMContext &C = *DAG.getContext(); + if (isVarArg && !Outs.empty()) { + + SmallVector ArgLocs; + CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C); + + CCInfo.AnalyzeCallOperands(Outs, CC_M68k); + for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) + if (!ArgLocs[i].isRegLoc()) + return false; + } + + // Check that the call results are passed in the same way. + if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, C, Ins, RetCC_M68k, + RetCC_M68k)) + return false; + + // The callee has to preserve all registers the caller needs to preserve. + const M68kRegisterInfo *TRI = Subtarget.getRegisterInfo(); + const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC); + if (!CCMatch) { + const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC); + if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved)) + return false; + } + + unsigned StackArgsSize = 0; + + // If the callee takes no arguments then go on to check the results of the + // call. + if (!Outs.empty()) { + // Check if stack adjustment is needed. For now, do not do this if any + // argument is passed on the stack. + SmallVector ArgLocs; + CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C); + + CCInfo.AnalyzeCallOperands(Outs, CC_M68k); + StackArgsSize = CCInfo.getNextStackOffset(); + + if (CCInfo.getNextStackOffset()) { + // Check if the arguments are already laid out in the right way as + // the caller's fixed stack objects. + MachineFrameInfo &MFI = MF.getFrameInfo(); + const MachineRegisterInfo *MRI = &MF.getRegInfo(); + const M68kInstrInfo *TII = Subtarget.getInstrInfo(); + for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { + CCValAssign &VA = ArgLocs[i]; + SDValue Arg = OutVals[i]; + ISD::ArgFlagsTy Flags = Outs[i].Flags; + if (VA.getLocInfo() == CCValAssign::Indirect) + return false; + if (!VA.isRegLoc()) { + if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags, MFI, MRI, + TII, VA)) + return false; + } + } + } + + bool PositionIndependent = isPositionIndependent(); + // If the tailcall address may be in a register, then make sure it's + // possible to register allocate for it. The call address can + // only target %A0 or %A1 since the tail call must be scheduled after + // callee-saved registers are restored. These happen to be the same + // registers used to pass 'inreg' arguments so watch out for those. + if ((!isa(Callee) && + !isa(Callee)) || + PositionIndependent) { + unsigned NumInRegs = 0; + // In PIC we need an extra register to formulate the address computation + // for the callee. + unsigned MaxInRegs = PositionIndependent ? 1 : 2; + + for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { + CCValAssign &VA = ArgLocs[i]; + if (!VA.isRegLoc()) + continue; + unsigned Reg = VA.getLocReg(); + switch (Reg) { + default: + break; + case M68k::A0: + case M68k::A1: + if (++NumInRegs == MaxInRegs) + return false; + break; + } + } + } + + const MachineRegisterInfo &MRI = MF.getRegInfo(); + if (!parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals)) + return false; + } + + bool CalleeWillPop = M68k::isCalleePop( + CalleeCC, isVarArg, MF.getTarget().Options.GuaranteedTailCallOpt); + + if (unsigned BytesToPop = + MF.getInfo()->getBytesToPopOnReturn()) { + // If we have bytes to pop, the callee must pop them. + bool CalleePopMatches = CalleeWillPop && BytesToPop == StackArgsSize; + if (!CalleePopMatches) + return false; + } else if (CalleeWillPop && StackArgsSize > 0) { + // If we don't have bytes to pop, make sure the callee doesn't pop any. + return false; + } + + return true; +} + +//===----------------------------------------------------------------------===// +// Custom Lower +//===----------------------------------------------------------------------===// + +SDValue M68kTargetLowering::LowerOperation(SDValue Op, + SelectionDAG &DAG) const { + switch (Op.getOpcode()) { + default: + llvm_unreachable("Should not custom lower this!"); + case ISD::MUL: + return LowerMUL(Op, DAG); + case ISD::SADDO: + case ISD::UADDO: + case ISD::SSUBO: + case ISD::USUBO: + case ISD::SMULO: + case ISD::UMULO: + return LowerXALUO(Op, DAG); + case ISD::SETCC: + return LowerSETCC(Op, DAG); + case ISD::SETCCCARRY: + return LowerSETCCCARRY(Op, DAG); + case ISD::SELECT: + return LowerSELECT(Op, DAG); + case ISD::BRCOND: + return LowerBRCOND(Op, DAG); + case ISD::ADDC: + case ISD::ADDE: + case ISD::SUBC: + case ISD::SUBE: + return LowerADDC_ADDE_SUBC_SUBE(Op, DAG); + case ISD::ConstantPool: + return LowerConstantPool(Op, DAG); + case ISD::GlobalAddress: + return LowerGlobalAddress(Op, DAG); + case ISD::ExternalSymbol: + return LowerExternalSymbol(Op, DAG); + case ISD::BlockAddress: + return LowerBlockAddress(Op, DAG); + case ISD::JumpTable: + return LowerJumpTable(Op, DAG); + case ISD::VASTART: + return LowerVASTART(Op, DAG); + case ISD::DYNAMIC_STACKALLOC: + return LowerDYNAMIC_STACKALLOC(Op, DAG); + } +} + +SDValue M68kTargetLowering::LowerMUL(SDValue &N, SelectionDAG &DAG) const { + EVT VT = N->getValueType(0); + SDLoc DL(N); + + ConstantSDNode *C = dyn_cast(N->getOperand(1)); + + if (C && isPowerOf2_64(C->getZExtValue())) { + uint64_t MulAmt = C->getZExtValue(); + + if (isPowerOf2_64(MulAmt)) + return DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0), + DAG.getConstant(Log2_64(MulAmt), DL, MVT::i8)); + + if (isPowerOf2_64(MulAmt - 1)) { + // (mul x, 2^N + 1) => (add (shl x, N), x) + return DAG.getNode( + ISD::ADD, DL, VT, N->getOperand(0), + DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0), + DAG.getConstant(Log2_64(MulAmt - 1), DL, MVT::i8))); + } + + if (isPowerOf2_64(MulAmt + 1)) { + // (mul x, 2^N - 1) => (sub (shl x, N), x) + return DAG.getNode( + ISD::SUB, DL, VT, + DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0), + DAG.getConstant(Log2_64(MulAmt + 1), DL, MVT::i8)), + N->getOperand(0)); + } + } + + // These cannot be handle by M68000 and M68010 + if (!Subtarget.atLeastM68020()) { + SDValue LHS = N->getOperand(0); + SDValue RHS = N->getOperand(1); + MakeLibCallOptions LCO; + LCO.setSExt(); + if (VT == MVT::i32) { + SDValue Args[] = {LHS, RHS}; + return makeLibCall(DAG, RTLIB::MUL_I32, VT, Args, LCO, DL).first; + } else if (VT == MVT::i64) { + unsigned LoSize = VT.getSizeInBits(); + SDValue HiLHS = DAG.getNode( + ISD::SRA, DL, VT, LHS, + DAG.getConstant(LoSize - 1, DL, getPointerTy(DAG.getDataLayout()))); + SDValue HiRHS = DAG.getNode( + ISD::SRA, DL, VT, RHS, + DAG.getConstant(LoSize - 1, DL, getPointerTy(DAG.getDataLayout()))); + SDValue Args[] = {HiLHS, LHS, HiRHS, RHS}; + SDValue Ret = makeLibCall(DAG, RTLIB::MUL_I64, VT, Args, LCO, DL).first; + + // We are intereseted in Lo part + return DAG.getNode(ISD::EXTRACT_ELEMENT, DL, VT, Ret, + DAG.getIntPtrConstant(0, DL)); + } + } + + // The rest is considered legal + return SDValue(); +} + +SDValue M68kTargetLowering::LowerXALUO(SDValue Op, SelectionDAG &DAG) const { + // Lower the "add/sub/mul with overflow" instruction into a regular ins plus + // a "setcc" instruction that checks the overflow flag. The "brcond" lowering + // looks for this combo and may remove the "setcc" instruction if the "setcc" + // has only one use. + SDNode *N = Op.getNode(); + SDValue LHS = N->getOperand(0); + SDValue RHS = N->getOperand(1); + unsigned BaseOp = 0; + unsigned Cond = 0; + SDLoc DL(Op); + switch (Op.getOpcode()) { + default: + llvm_unreachable("Unknown ovf instruction!"); + case ISD::SADDO: + BaseOp = M68kISD::ADD; + Cond = M68k::COND_VS; + break; + case ISD::UADDO: + BaseOp = M68kISD::ADD; + Cond = M68k::COND_CS; + break; + case ISD::SSUBO: + BaseOp = M68kISD::SUB; + Cond = M68k::COND_VS; + break; + case ISD::USUBO: + BaseOp = M68kISD::SUB; + Cond = M68k::COND_CS; + break; + // case ISD::SMULO: + // BaseOp = M68kISD::SMUL; + // Cond = M68k::COND_VS; + // break; + // case ISD::UMULO: { // i64, i8 = umulo lhs, rhs --> i64, i64, i32 umul + // lhs,rhs + // SDVTList VTs = + // DAG.getVTList(N->getValueType(0), N->getValueType(0), MVT::i8); + // SDValue Sum = DAG.getNode(M68kISD::UMUL, DL, VTs, LHS, RHS); + // + // SDValue SetCC = + // DAG.getNode(M68kISD::SETCC, DL, N->getValueType(1), + // DAG.getConstant(M68k::COND_VS, DL, MVT::i8), + // SDValue(Sum.getNode(), 2)); + // + // return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC); + // } + } + + // Also sets CCR. + SDVTList VTs = DAG.getVTList(N->getValueType(0), MVT::i8); + SDValue Sum = DAG.getNode(BaseOp, DL, VTs, LHS, RHS); + + SDValue SetCC = DAG.getNode(M68kISD::SETCC, DL, N->getValueType(1), + DAG.getConstant(Cond, DL, MVT::i8), + SDValue(Sum.getNode(), 1)); + + return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC); +} + +/// Create a BT (Bit Test) node - Test bit \p BitNo in \p Src and set condition +/// according to equal/not-equal condition code \p CC. +static SDValue getBitTestCondition(SDValue Src, SDValue BitNo, ISD::CondCode CC, + const SDLoc &DL, SelectionDAG &DAG) { + // If Src is i8, promote it to i32 with any_extend. There is no i8 BT + // instruction. Since the shift amount is in-range-or-undefined, we know + // that doing a bittest on the i32 value is ok. + if (Src.getValueType() == MVT::i8 || Src.getValueType() == MVT::i16) + Src = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Src); + + // If the operand types disagree, extend the shift amount to match. Since + // BT ignores high bits (like shifts) we can use anyextend. + if (Src.getValueType() != BitNo.getValueType()) + BitNo = DAG.getNode(ISD::ANY_EXTEND, DL, Src.getValueType(), BitNo); + + SDValue BT = DAG.getNode(M68kISD::BT, DL, MVT::i32, Src, BitNo); + + // NOTE BTST sets CCR.Z flag + M68k::CondCode Cond = CC == ISD::SETEQ ? M68k::COND_NE : M68k::COND_EQ; + return DAG.getNode(M68kISD::SETCC, DL, MVT::i8, + DAG.getConstant(Cond, DL, MVT::i8), BT); +} + +/// Result of 'and' is compared against zero. Change to a BT node if possible. +static SDValue LowerAndToBT(SDValue And, ISD::CondCode CC, const SDLoc &DL, + SelectionDAG &DAG) { + SDValue Op0 = And.getOperand(0); + SDValue Op1 = And.getOperand(1); + if (Op0.getOpcode() == ISD::TRUNCATE) + Op0 = Op0.getOperand(0); + if (Op1.getOpcode() == ISD::TRUNCATE) + Op1 = Op1.getOperand(0); + + SDValue LHS, RHS; + if (Op1.getOpcode() == ISD::SHL) + std::swap(Op0, Op1); + if (Op0.getOpcode() == ISD::SHL) { + if (isOneConstant(Op0.getOperand(0))) { + // If we looked past a truncate, check that it's only truncating away + // known zeros. + unsigned BitWidth = Op0.getValueSizeInBits(); + unsigned AndBitWidth = And.getValueSizeInBits(); + if (BitWidth > AndBitWidth) { + auto Known = DAG.computeKnownBits(Op0); + if (Known.countMinLeadingZeros() < BitWidth - AndBitWidth) + return SDValue(); + } + LHS = Op1; + RHS = Op0.getOperand(1); + } + } else if (Op1.getOpcode() == ISD::Constant) { + ConstantSDNode *AndRHS = cast(Op1); + uint64_t AndRHSVal = AndRHS->getZExtValue(); + SDValue AndLHS = Op0; + + if (AndRHSVal == 1 && AndLHS.getOpcode() == ISD::SRL) { + LHS = AndLHS.getOperand(0); + RHS = AndLHS.getOperand(1); + } + + // Use BT if the immediate can't be encoded in a TEST instruction. + if (!isUInt<32>(AndRHSVal) && isPowerOf2_64(AndRHSVal)) { + LHS = AndLHS; + RHS = DAG.getConstant(Log2_64_Ceil(AndRHSVal), DL, LHS.getValueType()); + } + } + + if (LHS.getNode()) + return getBitTestCondition(LHS, RHS, CC, DL, DAG); + + return SDValue(); +} + +static M68k::CondCode TranslateIntegerM68kCC(ISD::CondCode SetCCOpcode) { + switch (SetCCOpcode) { + default: + llvm_unreachable("Invalid integer condition!"); + case ISD::SETEQ: + return M68k::COND_EQ; + case ISD::SETGT: + return M68k::COND_GT; + case ISD::SETGE: + return M68k::COND_GE; + case ISD::SETLT: + return M68k::COND_LT; + case ISD::SETLE: + return M68k::COND_LE; + case ISD::SETNE: + return M68k::COND_NE; + case ISD::SETULT: + return M68k::COND_CS; + case ISD::SETUGE: + return M68k::COND_CC; + case ISD::SETUGT: + return M68k::COND_HI; + case ISD::SETULE: + return M68k::COND_LS; + } +} + +/// Do a one-to-one translation of a ISD::CondCode to the M68k-specific +/// condition code, returning the condition code and the LHS/RHS of the +/// comparison to make. +static unsigned TranslateM68kCC(ISD::CondCode SetCCOpcode, const SDLoc &DL, + bool isFP, SDValue &LHS, SDValue &RHS, + SelectionDAG &DAG) { + if (!isFP) { + if (ConstantSDNode *RHSC = dyn_cast(RHS)) { + if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) { + // X > -1 -> X == 0, jump !sign. + RHS = DAG.getConstant(0, DL, RHS.getValueType()); + return M68k::COND_PL; + } + if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) { + // X < 0 -> X == 0, jump on sign. + return M68k::COND_MI; + } + if (SetCCOpcode == ISD::SETLT && RHSC->getZExtValue() == 1) { + // X < 1 -> X <= 0 + RHS = DAG.getConstant(0, DL, RHS.getValueType()); + return M68k::COND_LE; + } + } + + return TranslateIntegerM68kCC(SetCCOpcode); + } + + // First determine if it is required or is profitable to flip the operands. + + // If LHS is a foldable load, but RHS is not, flip the condition. + if (ISD::isNON_EXTLoad(LHS.getNode()) && !ISD::isNON_EXTLoad(RHS.getNode())) { + SetCCOpcode = getSetCCSwappedOperands(SetCCOpcode); + std::swap(LHS, RHS); + } + + switch (SetCCOpcode) { + default: + break; + case ISD::SETOLT: + case ISD::SETOLE: + case ISD::SETUGT: + case ISD::SETUGE: + std::swap(LHS, RHS); + break; + } + + // On a floating point condition, the flags are set as follows: + // ZF PF CF op + // 0 | 0 | 0 | X > Y + // 0 | 0 | 1 | X < Y + // 1 | 0 | 0 | X == Y + // 1 | 1 | 1 | unordered + switch (SetCCOpcode) { + default: + llvm_unreachable("Condcode should be pre-legalized away"); + case ISD::SETUEQ: + case ISD::SETEQ: + return M68k::COND_EQ; + case ISD::SETOLT: // flipped + case ISD::SETOGT: + case ISD::SETGT: + return M68k::COND_HI; + case ISD::SETOLE: // flipped + case ISD::SETOGE: + case ISD::SETGE: + return M68k::COND_CC; + case ISD::SETUGT: // flipped + case ISD::SETULT: + case ISD::SETLT: + return M68k::COND_CS; + case ISD::SETUGE: // flipped + case ISD::SETULE: + case ISD::SETLE: + return M68k::COND_LS; + case ISD::SETONE: + case ISD::SETNE: + return M68k::COND_NE; + // case ISD::SETUO: return M68k::COND_P; + // case ISD::SETO: return M68k::COND_NP; + case ISD::SETOEQ: + case ISD::SETUNE: + return M68k::COND_INVALID; + } +} + +// Convert (truncate (srl X, N) to i1) to (bt X, N) +static SDValue LowerTruncateToBT(SDValue Op, ISD::CondCode CC, const SDLoc &DL, + SelectionDAG &DAG) { + + assert(Op.getOpcode() == ISD::TRUNCATE && Op.getValueType() == MVT::i1 && + "Expected TRUNCATE to i1 node"); + + if (Op.getOperand(0).getOpcode() != ISD::SRL) + return SDValue(); + + SDValue ShiftRight = Op.getOperand(0); + return getBitTestCondition(ShiftRight.getOperand(0), ShiftRight.getOperand(1), + CC, DL, DAG); +} + +/// \brief return true if \c Op has a use that doesn't just read flags. +static bool hasNonFlagsUse(SDValue Op) { + for (SDNode::use_iterator UI = Op->use_begin(), UE = Op->use_end(); UI != UE; + ++UI) { + SDNode *User = *UI; + unsigned UOpNo = UI.getOperandNo(); + if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) { + // Look pass truncate. + UOpNo = User->use_begin().getOperandNo(); + User = *User->use_begin(); + } + + if (User->getOpcode() != ISD::BRCOND && User->getOpcode() != ISD::SETCC && + !(User->getOpcode() == ISD::SELECT && UOpNo == 0)) + return true; + } + return false; +} + +SDValue M68kTargetLowering::EmitTest(SDValue Op, unsigned M68kCC, + const SDLoc &DL, SelectionDAG &DAG) const { + + // CF and OF aren't always set the way we want. Determine which + // of these we need. + bool NeedCF = false; + bool NeedOF = false; + switch (M68kCC) { + default: + break; + case M68k::COND_HI: + case M68k::COND_CC: + case M68k::COND_CS: + case M68k::COND_LS: + NeedCF = true; + break; + case M68k::COND_GT: + case M68k::COND_GE: + case M68k::COND_LT: + case M68k::COND_LE: + case M68k::COND_VS: + case M68k::COND_VC: { + // Check if we really need to set the + // Overflow flag. If NoSignedWrap is present + // that is not actually needed. + switch (Op->getOpcode()) { + case ISD::ADD: + case ISD::SUB: + case ISD::MUL: + case ISD::SHL: { + if (Op.getNode()->getFlags().hasNoSignedWrap()) + break; + LLVM_FALLTHROUGH; + } + default: + NeedOF = true; + break; + } + break; + } + } + // See if we can use the CCR value from the operand instead of + // doing a separate TEST. TEST always sets OF and CF to 0, so unless + // we prove that the arithmetic won't overflow, we can't use OF or CF. + if (Op.getResNo() != 0 || NeedOF || NeedCF) { + // Emit a CMP with 0, which is the TEST pattern. + return DAG.getNode(M68kISD::CMP, DL, MVT::i8, + DAG.getConstant(0, DL, Op.getValueType()), Op); + } + unsigned Opcode = 0; + unsigned NumOperands = 0; + + // Truncate operations may prevent the merge of the SETCC instruction + // and the arithmetic instruction before it. Attempt to truncate the operands + // of the arithmetic instruction and use a reduced bit-width instruction. + bool NeedTruncation = false; + SDValue ArithOp = Op; + if (Op->getOpcode() == ISD::TRUNCATE && Op->hasOneUse()) { + SDValue Arith = Op->getOperand(0); + // Both the trunc and the arithmetic op need to have one user each. + if (Arith->hasOneUse()) + switch (Arith.getOpcode()) { + default: + break; + case ISD::ADD: + case ISD::SUB: + case ISD::AND: + case ISD::OR: + case ISD::XOR: { + NeedTruncation = true; + ArithOp = Arith; + } + } + } + + // NOTICE: In the code below we use ArithOp to hold the arithmetic operation + // which may be the result of a CAST. We use the variable 'Op', which is the + // non-casted variable when we check for possible users. + switch (ArithOp.getOpcode()) { + case ISD::ADD: + Opcode = M68kISD::ADD; + NumOperands = 2; + break; + case ISD::SHL: + case ISD::SRL: + // If we have a constant logical shift that's only used in a comparison + // against zero turn it into an equivalent AND. This allows turning it into + // a TEST instruction later. + if ((M68kCC == M68k::COND_EQ || M68kCC == M68k::COND_NE) && + Op->hasOneUse() && isa(Op->getOperand(1)) && + !hasNonFlagsUse(Op)) { + EVT VT = Op.getValueType(); + unsigned BitWidth = VT.getSizeInBits(); + unsigned ShAmt = Op->getConstantOperandVal(1); + if (ShAmt >= BitWidth) // Avoid undefined shifts. + break; + APInt Mask = ArithOp.getOpcode() == ISD::SRL + ? APInt::getHighBitsSet(BitWidth, BitWidth - ShAmt) + : APInt::getLowBitsSet(BitWidth, BitWidth - ShAmt); + if (!Mask.isSignedIntN(32)) // Avoid large immediates. + break; + Op = DAG.getNode(ISD::AND, DL, VT, Op->getOperand(0), + DAG.getConstant(Mask, DL, VT)); + } + break; + + case ISD::AND: + // If the primary 'and' result isn't used, don't bother using + // M68kISD::AND, because a TEST instruction will be better. + if (!hasNonFlagsUse(Op)) { + SDValue Op0 = ArithOp->getOperand(0); + SDValue Op1 = ArithOp->getOperand(1); + EVT VT = ArithOp.getValueType(); + bool isAndn = isBitwiseNot(Op0) || isBitwiseNot(Op1); + bool isLegalAndnType = VT == MVT::i32 || VT == MVT::i64; + + // But if we can combine this into an ANDN operation, then create an AND + // now and allow it to be pattern matched into an ANDN. + if (/*!Subtarget.hasBMI() ||*/ !isAndn || !isLegalAndnType) + break; + } + LLVM_FALLTHROUGH; + case ISD::SUB: + case ISD::OR: + case ISD::XOR: + // Due to the ISEL shortcoming noted above, be conservative if this op is + // likely to be selected as part of a load-modify-store instruction. + for (SDNode::use_iterator UI = Op.getNode()->use_begin(), + UE = Op.getNode()->use_end(); + UI != UE; ++UI) + if (UI->getOpcode() == ISD::STORE) + goto default_case; + + // Otherwise use a regular CCR-setting instruction. + switch (ArithOp.getOpcode()) { + default: + llvm_unreachable("unexpected operator!"); + case ISD::SUB: + Opcode = M68kISD::SUB; + break; + case ISD::XOR: + Opcode = M68kISD::XOR; + break; + case ISD::AND: + Opcode = M68kISD::AND; + break; + case ISD::OR: + Opcode = M68kISD::OR; + break; + } + + NumOperands = 2; + break; + case M68kISD::ADD: + case M68kISD::SUB: + case M68kISD::OR: + case M68kISD::XOR: + case M68kISD::AND: + return SDValue(Op.getNode(), 1); + default: + default_case: + break; + } + + // If we found that truncation is beneficial, perform the truncation and + // update 'Op'. + if (NeedTruncation) { + EVT VT = Op.getValueType(); + SDValue WideVal = Op->getOperand(0); + EVT WideVT = WideVal.getValueType(); + unsigned ConvertedOp = 0; + // Use a target machine opcode to prevent further DAGCombine + // optimizations that may separate the arithmetic operations + // from the setcc node. + switch (WideVal.getOpcode()) { + default: + break; + case ISD::ADD: + ConvertedOp = M68kISD::ADD; + break; + case ISD::SUB: + ConvertedOp = M68kISD::SUB; + break; + case ISD::AND: + ConvertedOp = M68kISD::AND; + break; + case ISD::OR: + ConvertedOp = M68kISD::OR; + break; + case ISD::XOR: + ConvertedOp = M68kISD::XOR; + break; + } + + if (ConvertedOp) { + const TargetLowering &TLI = DAG.getTargetLoweringInfo(); + if (TLI.isOperationLegal(WideVal.getOpcode(), WideVT)) { + SDValue V0 = DAG.getNode(ISD::TRUNCATE, DL, VT, WideVal.getOperand(0)); + SDValue V1 = DAG.getNode(ISD::TRUNCATE, DL, VT, WideVal.getOperand(1)); + Op = DAG.getNode(ConvertedOp, DL, VT, V0, V1); + } + } + } + + if (Opcode == 0) { + // Emit a CMP with 0, which is the TEST pattern. + return DAG.getNode(M68kISD::CMP, DL, MVT::i8, + DAG.getConstant(0, DL, Op.getValueType()), Op); + } + SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i8); + SmallVector Ops(Op->op_begin(), Op->op_begin() + NumOperands); + + SDValue New = DAG.getNode(Opcode, DL, VTs, Ops); + DAG.ReplaceAllUsesWith(Op, New); + return SDValue(New.getNode(), 1); +} + +/// \brief Return true if the condition is an unsigned comparison operation. +static bool isM68kCCUnsigned(unsigned M68kCC) { + switch (M68kCC) { + default: + llvm_unreachable("Invalid integer condition!"); + case M68k::COND_EQ: + case M68k::COND_NE: + case M68k::COND_CS: + case M68k::COND_HI: + case M68k::COND_LS: + case M68k::COND_CC: + return true; + case M68k::COND_GT: + case M68k::COND_GE: + case M68k::COND_LT: + case M68k::COND_LE: + return false; + } +} + +SDValue M68kTargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned M68kCC, + const SDLoc &DL, SelectionDAG &DAG) const { + if (isNullConstant(Op1)) + return EmitTest(Op0, M68kCC, DL, DAG); + + assert(!(isa(Op1) && Op0.getValueType() == MVT::i1) && + "Unexpected comparison operation for MVT::i1 operands"); + + if ((Op0.getValueType() == MVT::i8 || Op0.getValueType() == MVT::i16 || + Op0.getValueType() == MVT::i32 || Op0.getValueType() == MVT::i64)) { + // Only promote the compare up to I32 if it is a 16 bit operation + // with an immediate. 16 bit immediates are to be avoided. + if ((Op0.getValueType() == MVT::i16 && + (isa(Op0) || isa(Op1))) && + !DAG.getMachineFunction().getFunction().hasMinSize()) { + unsigned ExtendOp = + isM68kCCUnsigned(M68kCC) ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND; + Op0 = DAG.getNode(ExtendOp, DL, MVT::i32, Op0); + Op1 = DAG.getNode(ExtendOp, DL, MVT::i32, Op1); + } + // Use SUB instead of CMP to enable CSE between SUB and CMP. + SDVTList VTs = DAG.getVTList(Op0.getValueType(), MVT::i8); + SDValue Sub = DAG.getNode(M68kISD::SUB, DL, VTs, Op0, Op1); + return SDValue(Sub.getNode(), 1); + } + return DAG.getNode(M68kISD::CMP, DL, MVT::i8, Op0, Op1); +} + +/// Result of 'and' or 'trunc to i1' is compared against zero. +/// Change to a BT node if possible. +SDValue M68kTargetLowering::LowerToBT(SDValue Op, ISD::CondCode CC, + const SDLoc &DL, + SelectionDAG &DAG) const { + if (Op.getOpcode() == ISD::AND) + return LowerAndToBT(Op, CC, DL, DAG); + if (Op.getOpcode() == ISD::TRUNCATE && Op.getValueType() == MVT::i1) + return LowerTruncateToBT(Op, CC, DL, DAG); + return SDValue(); +} + +SDValue M68kTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { + MVT VT = Op.getSimpleValueType(); + assert(VT == MVT::i8 && "SetCC type must be 8-bit integer"); + + SDValue Op0 = Op.getOperand(0); + SDValue Op1 = Op.getOperand(1); + SDLoc DL(Op); + ISD::CondCode CC = cast(Op.getOperand(2))->get(); + + // Optimize to BT if possible. + // Lower (X & (1 << N)) == 0 to BT(X, N). + // Lower ((X >>u N) & 1) != 0 to BT(X, N). + // Lower ((X >>s N) & 1) != 0 to BT(X, N). + // Lower (trunc (X >> N) to i1) to BT(X, N). + if (Op0.hasOneUse() && isNullConstant(Op1) && + (CC == ISD::SETEQ || CC == ISD::SETNE)) { + if (SDValue NewSetCC = LowerToBT(Op0, CC, DL, DAG)) { + if (VT == MVT::i1) + return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, NewSetCC); + return NewSetCC; + } + } + + // Look for X == 0, X == 1, X != 0, or X != 1. We can simplify some forms of + // these. + if ((isOneConstant(Op1) || isNullConstant(Op1)) && + (CC == ISD::SETEQ || CC == ISD::SETNE)) { + + // If the input is a setcc, then reuse the input setcc or use a new one with + // the inverted condition. + if (Op0.getOpcode() == M68kISD::SETCC) { + M68k::CondCode CCode = (M68k::CondCode)Op0.getConstantOperandVal(0); + bool Invert = (CC == ISD::SETNE) ^ isNullConstant(Op1); + if (!Invert) + return Op0; + + CCode = M68k::GetOppositeBranchCondition(CCode); + SDValue SetCC = + DAG.getNode(M68kISD::SETCC, DL, MVT::i8, + DAG.getConstant(CCode, DL, MVT::i8), Op0.getOperand(1)); + if (VT == MVT::i1) + return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, SetCC); + return SetCC; + } + } + if (Op0.getValueType() == MVT::i1 && (CC == ISD::SETEQ || CC == ISD::SETNE)) { + if (isOneConstant(Op1)) { + // FIXME: See be15dfa88fb1 and a0f4600f4f0ec + ISD::CondCode NewCC = ISD::GlobalISel::getSetCCInverse(CC, true); + return DAG.getSetCC(DL, VT, Op0, DAG.getConstant(0, DL, MVT::i1), NewCC); + } + if (!isNullConstant(Op1)) { + SDValue Xor = DAG.getNode(ISD::XOR, DL, MVT::i1, Op0, Op1); + return DAG.getSetCC(DL, VT, Xor, DAG.getConstant(0, DL, MVT::i1), CC); + } + } + + bool isFP = Op1.getSimpleValueType().isFloatingPoint(); + unsigned M68kCC = TranslateM68kCC(CC, DL, isFP, Op0, Op1, DAG); + if (M68kCC == M68k::COND_INVALID) + return SDValue(); + + SDValue CCR = EmitCmp(Op0, Op1, M68kCC, DL, DAG); + // CCR = ConvertCmpIfNecessary(CCR, DAG); + return DAG.getNode(M68kISD::SETCC, DL, MVT::i8, + DAG.getConstant(M68kCC, DL, MVT::i8), CCR); +} + +SDValue M68kTargetLowering::LowerSETCCCARRY(SDValue Op, + SelectionDAG &DAG) const { + SDValue LHS = Op.getOperand(0); + SDValue RHS = Op.getOperand(1); + SDValue Carry = Op.getOperand(2); + SDValue Cond = Op.getOperand(3); + SDLoc DL(Op); + + assert(LHS.getSimpleValueType().isInteger() && "SETCCCARRY is integer only."); + M68k::CondCode CC = TranslateIntegerM68kCC(cast(Cond)->get()); + + EVT CarryVT = Carry.getValueType(); + APInt NegOne = APInt::getAllOnesValue(CarryVT.getScalarSizeInBits()); + Carry = DAG.getNode(M68kISD::ADD, DL, DAG.getVTList(CarryVT, MVT::i32), Carry, + DAG.getConstant(NegOne, DL, CarryVT)); + + SDVTList VTs = DAG.getVTList(LHS.getValueType(), MVT::i32); + SDValue Cmp = + DAG.getNode(M68kISD::SUBX, DL, VTs, LHS, RHS, Carry.getValue(1)); + + return DAG.getNode(M68kISD::SETCC, DL, MVT::i8, + DAG.getConstant(CC, DL, MVT::i8), Cmp.getValue(1)); +} + +/// Return true if opcode is a M68k logical comparison. +static bool isM68kLogicalCmp(SDValue Op) { + unsigned Opc = Op.getNode()->getOpcode(); + if (Opc == M68kISD::CMP) + return true; + if (Op.getResNo() == 1 && + (Opc == M68kISD::ADD || Opc == M68kISD::SUB || Opc == M68kISD::ADDX || + Opc == M68kISD::SUBX || Opc == M68kISD::SMUL || Opc == M68kISD::UMUL || + Opc == M68kISD::OR || Opc == M68kISD::XOR || Opc == M68kISD::AND)) + return true; + + if (Op.getResNo() == 2 && Opc == M68kISD::UMUL) + return true; + + return false; +} + +static bool isTruncWithZeroHighBitsInput(SDValue V, SelectionDAG &DAG) { + if (V.getOpcode() != ISD::TRUNCATE) + return false; + + SDValue VOp0 = V.getOperand(0); + unsigned InBits = VOp0.getValueSizeInBits(); + unsigned Bits = V.getValueSizeInBits(); + return DAG.MaskedValueIsZero(VOp0, + APInt::getHighBitsSet(InBits, InBits - Bits)); +} + +SDValue M68kTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { + bool addTest = true; + SDValue Cond = Op.getOperand(0); + SDValue Op1 = Op.getOperand(1); + SDValue Op2 = Op.getOperand(2); + SDLoc DL(Op); + SDValue CC; + + if (Cond.getOpcode() == ISD::SETCC) { + if (SDValue NewCond = LowerSETCC(Cond, DAG)) + Cond = NewCond; + } + + // (select (x == 0), -1, y) -> (sign_bit (x - 1)) | y + // (select (x == 0), y, -1) -> ~(sign_bit (x - 1)) | y + // (select (x != 0), y, -1) -> (sign_bit (x - 1)) | y + // (select (x != 0), -1, y) -> ~(sign_bit (x - 1)) | y + if (Cond.getOpcode() == M68kISD::SETCC && + Cond.getOperand(1).getOpcode() == M68kISD::CMP && + isNullConstant(Cond.getOperand(1).getOperand(0))) { + SDValue Cmp = Cond.getOperand(1); + + unsigned CondCode = + cast(Cond.getOperand(0))->getZExtValue(); + + if ((isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) && + (CondCode == M68k::COND_EQ || CondCode == M68k::COND_NE)) { + SDValue Y = isAllOnesConstant(Op2) ? Op1 : Op2; + + SDValue CmpOp0 = Cmp.getOperand(1); + // Apply further optimizations for special cases + // (select (x != 0), -1, 0) -> neg & sbb + // (select (x == 0), 0, -1) -> neg & sbb + if (isNullConstant(Y) && + (isAllOnesConstant(Op1) == (CondCode == M68k::COND_NE))) { + + SDVTList VTs = DAG.getVTList(CmpOp0.getValueType(), MVT::i32); + + SDValue Neg = + DAG.getNode(M68kISD::SUB, DL, VTs, + DAG.getConstant(0, DL, CmpOp0.getValueType()), CmpOp0); + + SDValue Res = DAG.getNode(M68kISD::SETCC_CARRY, DL, Op.getValueType(), + DAG.getConstant(M68k::COND_CS, DL, MVT::i8), + SDValue(Neg.getNode(), 1)); + return Res; + } + + Cmp = DAG.getNode(M68kISD::CMP, DL, MVT::i8, + DAG.getConstant(1, DL, CmpOp0.getValueType()), CmpOp0); + // Cmp = ConvertCmpIfNecessary(Cmp, DAG); + + SDValue Res = // Res = 0 or -1. + DAG.getNode(M68kISD::SETCC_CARRY, DL, Op.getValueType(), + DAG.getConstant(M68k::COND_CS, DL, MVT::i8), Cmp); + + if (isAllOnesConstant(Op1) != (CondCode == M68k::COND_EQ)) + Res = DAG.getNOT(DL, Res, Res.getValueType()); + + if (!isNullConstant(Op2)) + Res = DAG.getNode(ISD::OR, DL, Res.getValueType(), Res, Y); + return Res; + } + } + + // Look past (and (setcc_carry (cmp ...)), 1). + if (Cond.getOpcode() == ISD::AND && + Cond.getOperand(0).getOpcode() == M68kISD::SETCC_CARRY && + isOneConstant(Cond.getOperand(1))) + Cond = Cond.getOperand(0); + + // If condition flag is set by a M68kISD::CMP, then use it as the condition + // setting operand in place of the M68kISD::SETCC. + unsigned CondOpcode = Cond.getOpcode(); + if (CondOpcode == M68kISD::SETCC || CondOpcode == M68kISD::SETCC_CARRY) { + CC = Cond.getOperand(0); + + SDValue Cmp = Cond.getOperand(1); + unsigned Opc = Cmp.getOpcode(); + + bool IllegalFPCMov = false; + + if ((isM68kLogicalCmp(Cmp) && !IllegalFPCMov) || Opc == M68kISD::BT) { + Cond = Cmp; + addTest = false; + } + } else if (CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO || + CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO || + CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) { + SDValue LHS = Cond.getOperand(0); + SDValue RHS = Cond.getOperand(1); + unsigned MxOpcode; + unsigned MxCond; + SDVTList VTs; + switch (CondOpcode) { + case ISD::UADDO: + MxOpcode = M68kISD::ADD; + MxCond = M68k::COND_CS; + break; + case ISD::SADDO: + MxOpcode = M68kISD::ADD; + MxCond = M68k::COND_VS; + break; + case ISD::USUBO: + MxOpcode = M68kISD::SUB; + MxCond = M68k::COND_CS; + break; + case ISD::SSUBO: + MxOpcode = M68kISD::SUB; + MxCond = M68k::COND_VS; + break; + case ISD::UMULO: + MxOpcode = M68kISD::UMUL; + MxCond = M68k::COND_VS; + break; + case ISD::SMULO: + MxOpcode = M68kISD::SMUL; + MxCond = M68k::COND_VS; + break; + default: + llvm_unreachable("unexpected overflowing operator"); + } + if (CondOpcode == ISD::UMULO) + VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(), MVT::i32); + else + VTs = DAG.getVTList(LHS.getValueType(), MVT::i32); + + SDValue MxOp = DAG.getNode(MxOpcode, DL, VTs, LHS, RHS); + + if (CondOpcode == ISD::UMULO) + Cond = MxOp.getValue(2); + else + Cond = MxOp.getValue(1); + + CC = DAG.getConstant(MxCond, DL, MVT::i8); + addTest = false; + } + + if (addTest) { + // Look past the truncate if the high bits are known zero. + if (isTruncWithZeroHighBitsInput(Cond, DAG)) + Cond = Cond.getOperand(0); + + // We know the result of AND is compared against zero. Try to match + // it to BT. + if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) { + if (SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, DL, DAG)) { + CC = NewSetCC.getOperand(0); + Cond = NewSetCC.getOperand(1); + addTest = false; + } + } + } + + if (addTest) { + CC = DAG.getConstant(M68k::COND_NE, DL, MVT::i8); + Cond = EmitTest(Cond, M68k::COND_NE, DL, DAG); + } + + // a < b ? -1 : 0 -> RES = ~setcc_carry + // a < b ? 0 : -1 -> RES = setcc_carry + // a >= b ? -1 : 0 -> RES = setcc_carry + // a >= b ? 0 : -1 -> RES = ~setcc_carry + if (Cond.getOpcode() == M68kISD::SUB) { + // Cond = ConvertCmpIfNecessary(Cond, DAG); + unsigned CondCode = cast(CC)->getZExtValue(); + + if ((CondCode == M68k::COND_CC || CondCode == M68k::COND_CS) && + (isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) && + (isNullConstant(Op1) || isNullConstant(Op2))) { + SDValue Res = + DAG.getNode(M68kISD::SETCC_CARRY, DL, Op.getValueType(), + DAG.getConstant(M68k::COND_CS, DL, MVT::i8), Cond); + if (isAllOnesConstant(Op1) != (CondCode == M68k::COND_CS)) + return DAG.getNOT(DL, Res, Res.getValueType()); + return Res; + } + } + + // M68k doesn't have an i8 cmov. If both operands are the result of a + // truncate widen the cmov and push the truncate through. This avoids + // introducing a new branch during isel and doesn't add any extensions. + if (Op.getValueType() == MVT::i8 && Op1.getOpcode() == ISD::TRUNCATE && + Op2.getOpcode() == ISD::TRUNCATE) { + SDValue T1 = Op1.getOperand(0), T2 = Op2.getOperand(0); + if (T1.getValueType() == T2.getValueType() && + // Blacklist CopyFromReg to avoid partial register stalls. + T1.getOpcode() != ISD::CopyFromReg && + T2.getOpcode() != ISD::CopyFromReg) { + SDVTList VTs = DAG.getVTList(T1.getValueType(), MVT::Glue); + SDValue Cmov = DAG.getNode(M68kISD::CMOV, DL, VTs, T2, T1, CC, Cond); + return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov); + } + } + + // M68kISD::CMOV means set the result (which is operand 1) to the RHS if + // condition is true. + SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue); + SDValue Ops[] = {Op2, Op1, CC, Cond}; + return DAG.getNode(M68kISD::CMOV, DL, VTs, Ops); +} + +/// Return true if node is an ISD::AND or ISD::OR of two M68k::SETcc nodes +/// each of which has no other use apart from the AND / OR. +static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc) { + Opc = Op.getOpcode(); + if (Opc != ISD::OR && Opc != ISD::AND) + return false; + return (M68k::IsSETCC(Op.getOperand(0).getOpcode()) && + Op.getOperand(0).hasOneUse() && + M68k::IsSETCC(Op.getOperand(1).getOpcode()) && + Op.getOperand(1).hasOneUse()); +} + +/// Return true if node is an ISD::XOR of a M68kISD::SETCC and 1 and that the +/// SETCC node has a single use. +static bool isXor1OfSetCC(SDValue Op) { + if (Op.getOpcode() != ISD::XOR) + return false; + if (isOneConstant(Op.getOperand(1))) + return Op.getOperand(0).getOpcode() == M68kISD::SETCC && + Op.getOperand(0).hasOneUse(); + return false; +} + +SDValue M68kTargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const { + bool addTest = true; + SDValue Chain = Op.getOperand(0); + SDValue Cond = Op.getOperand(1); + SDValue Dest = Op.getOperand(2); + SDLoc DL(Op); + SDValue CC; + bool Inverted = false; + + if (Cond.getOpcode() == ISD::SETCC) { + // Check for setcc([su]{add,sub,mul}o == 0). + if (cast(Cond.getOperand(2))->get() == ISD::SETEQ && + isNullConstant(Cond.getOperand(1)) && + Cond.getOperand(0).getResNo() == 1 && + (Cond.getOperand(0).getOpcode() == ISD::SADDO || + Cond.getOperand(0).getOpcode() == ISD::UADDO || + Cond.getOperand(0).getOpcode() == ISD::SSUBO || + Cond.getOperand(0).getOpcode() == ISD::USUBO /*|| + Cond.getOperand(0).getOpcode() == ISD::SMULO || + Cond.getOperand(0).getOpcode() == ISD::UMULO)*/)) { + Inverted = true; + Cond = Cond.getOperand(0); + } else { + if (SDValue NewCond = LowerSETCC(Cond, DAG)) + Cond = NewCond; + } + } +#if 0 + // FIXME: LowerXALUO doesn't handle these!! + else if (Cond.getOpcode() == M68kISD::ADD || + Cond.getOpcode() == M68kISD::SUB || + Cond.getOpcode() == M68kISD::SMUL || + Cond.getOpcode() == M68kISD::UMUL) + Cond = LowerXALUO(Cond, DAG); +#endif + + // Look pass (and (setcc_carry (cmp ...)), 1). + if (Cond.getOpcode() == ISD::AND && + Cond.getOperand(0).getOpcode() == M68kISD::SETCC_CARRY && + isOneConstant(Cond.getOperand(1))) + Cond = Cond.getOperand(0); + + // If condition flag is set by a M68kISD::CMP, then use it as the condition + // setting operand in place of the M68kISD::SETCC. + unsigned CondOpcode = Cond.getOpcode(); + if (CondOpcode == M68kISD::SETCC || CondOpcode == M68kISD::SETCC_CARRY) { + CC = Cond.getOperand(0); + + SDValue Cmp = Cond.getOperand(1); + unsigned Opc = Cmp.getOpcode(); + + if (isM68kLogicalCmp(Cmp) || Opc == M68kISD::BT) { + Cond = Cmp; + addTest = false; + } else { + switch (cast(CC)->getZExtValue()) { + default: + break; + case M68k::COND_VS: + case M68k::COND_CS: + // These can only come from an arithmetic instruction with overflow, + // e.g. SADDO, UADDO. + Cond = Cond.getNode()->getOperand(1); + addTest = false; + break; + } + } + } + CondOpcode = Cond.getOpcode(); + if (CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO || + CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO /*|| + CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO*/) { + SDValue LHS = Cond.getOperand(0); + SDValue RHS = Cond.getOperand(1); + unsigned MxOpcode; + unsigned MxCond; + SDVTList VTs; + // Keep this in sync with LowerXALUO, otherwise we might create redundant + // instructions that can't be removed afterwards (i.e. M68kISD::ADD and + // M68kISD::INC). + switch (CondOpcode) { + case ISD::UADDO: + MxOpcode = M68kISD::ADD; + MxCond = M68k::COND_CS; + break; + case ISD::SADDO: + MxOpcode = M68kISD::ADD; + MxCond = M68k::COND_VS; + break; + case ISD::USUBO: + MxOpcode = M68kISD::SUB; + MxCond = M68k::COND_CS; + break; + case ISD::SSUBO: + MxOpcode = M68kISD::SUB; + MxCond = M68k::COND_VS; + break; + case ISD::UMULO: + MxOpcode = M68kISD::UMUL; + MxCond = M68k::COND_VS; + break; + case ISD::SMULO: + MxOpcode = M68kISD::SMUL; + MxCond = M68k::COND_VS; + break; + default: + llvm_unreachable("unexpected overflowing operator"); + } + + if (Inverted) + MxCond = M68k::GetOppositeBranchCondition((M68k::CondCode)MxCond); + + if (CondOpcode == ISD::UMULO) + VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(), MVT::i8); + else + VTs = DAG.getVTList(LHS.getValueType(), MVT::i8); + + SDValue MxOp = DAG.getNode(MxOpcode, DL, VTs, LHS, RHS); + + if (CondOpcode == ISD::UMULO) + Cond = MxOp.getValue(2); + else + Cond = MxOp.getValue(1); + + CC = DAG.getConstant(MxCond, DL, MVT::i8); + addTest = false; + } else { + unsigned CondOpc; + if (Cond.hasOneUse() && isAndOrOfSetCCs(Cond, CondOpc)) { + SDValue Cmp = Cond.getOperand(0).getOperand(1); + if (CondOpc == ISD::OR) { + // Also, recognize the pattern generated by an FCMP_UNE. We can emit + // two branches instead of an explicit OR instruction with a + // separate test. + if (Cmp == Cond.getOperand(1).getOperand(1) && isM68kLogicalCmp(Cmp)) { + CC = Cond.getOperand(0).getOperand(0); + Chain = DAG.getNode(M68kISD::BRCOND, DL, Op.getValueType(), Chain, + Dest, CC, Cmp); + CC = Cond.getOperand(1).getOperand(0); + Cond = Cmp; + addTest = false; + } + } else { // ISD::AND + // Also, recognize the pattern generated by an FCMP_OEQ. We can emit + // two branches instead of an explicit AND instruction with a + // separate test. However, we only do this if this block doesn't + // have a fall-through edge, because this requires an explicit + // jmp when the condition is false. + if (Cmp == Cond.getOperand(1).getOperand(1) && isM68kLogicalCmp(Cmp) && + Op.getNode()->hasOneUse()) { + M68k::CondCode CCode = + (M68k::CondCode)Cond.getOperand(0).getConstantOperandVal(0); + CCode = M68k::GetOppositeBranchCondition(CCode); + CC = DAG.getConstant(CCode, DL, MVT::i8); + SDNode *User = *Op.getNode()->use_begin(); + // Look for an unconditional branch following this conditional branch. + // We need this because we need to reverse the successors in order + // to implement FCMP_OEQ. + if (User->getOpcode() == ISD::BR) { + SDValue FalseBB = User->getOperand(1); + SDNode *NewBR = + DAG.UpdateNodeOperands(User, User->getOperand(0), Dest); + assert(NewBR == User); + (void)NewBR; + Dest = FalseBB; + + Chain = DAG.getNode(M68kISD::BRCOND, DL, Op.getValueType(), Chain, + Dest, CC, Cmp); + M68k::CondCode CCode = + (M68k::CondCode)Cond.getOperand(1).getConstantOperandVal(0); + CCode = M68k::GetOppositeBranchCondition(CCode); + CC = DAG.getConstant(CCode, DL, MVT::i8); + Cond = Cmp; + addTest = false; + } + } + } + } else if (Cond.hasOneUse() && isXor1OfSetCC(Cond)) { + // Recognize for xorb (setcc), 1 patterns. The xor inverts the condition. + // It should be transformed during dag combiner except when the condition + // is set by a arithmetics with overflow node. + M68k::CondCode CCode = + (M68k::CondCode)Cond.getOperand(0).getConstantOperandVal(0); + CCode = M68k::GetOppositeBranchCondition(CCode); + CC = DAG.getConstant(CCode, DL, MVT::i8); + Cond = Cond.getOperand(0).getOperand(1); + addTest = false; + } /*else if (Cond.getOpcode() == ISD::SETCC && + cast(Cond.getOperand(2))->get() == ISD::SETOEQ) { + // For FCMP_OEQ, we can emit + // two branches instead of an explicit AND instruction with a + // separate test. However, we only do this if this block doesn't + // have a fall-through edge, because this requires an explicit + // jmp when the condition is false. + if (Op.getNode()->hasOneUse()) { + SDNode *User = *Op.getNode()->use_begin(); + // Look for an unconditional branch following this conditional branch. + // We need this because we need to reverse the successors in order + // to implement FCMP_OEQ. + if (User->getOpcode() == ISD::BR) { + SDValue FalseBB = User->getOperand(1); + SDNode *NewBR = + DAG.UpdateNodeOperands(User, User->getOperand(0), Dest); + assert(NewBR == User); + (void)NewBR; + Dest = FalseBB; + + SDValue Cmp = DAG.getNode(M68kISD::CMP, DL, MVT::i32, + Cond.getOperand(0), Cond.getOperand(1)); + // Cmp = ConvertCmpIfNecessary(Cmp, DAG); + CC = DAG.getConstant(M68k::COND_NE, DL, MVT::i8); + Chain = DAG.getNode(M68kISD::BRCOND, DL, Op.getValueType(), + Chain, Dest, CC, Cmp); + CC = DAG.getConstant(M68k::COND_P, DL, MVT::i8); + Cond = Cmp; + addTest = false; + } + } + } else if (Cond.getOpcode() == ISD::SETCC && + cast(Cond.getOperand(2))->get() == ISD::SETUNE) { + // For FCMP_UNE, we can emit + // two branches instead of an explicit AND instruction with a + // separate test. However, we only do this if this block doesn't + // have a fall-through edge, because this requires an explicit + // jmp when the condition is false. + if (Op.getNode()->hasOneUse()) { + SDNode *User = *Op.getNode()->use_begin(); + // Look for an unconditional branch following this conditional branch. + // We need this because we need to reverse the successors in order + // to implement FCMP_UNE. + if (User->getOpcode() == ISD::BR) { + SDValue FalseBB = User->getOperand(1); + SDNode *NewBR = + DAG.UpdateNodeOperands(User, User->getOperand(0), Dest); + assert(NewBR == User); + (void)NewBR; + + SDValue Cmp = DAG.getNode(M68kISD::CMP, DL, MVT::i32, + Cond.getOperand(0), Cond.getOperand(1)); + Cmp = ConvertCmpIfNecessary(Cmp, DAG); + CC = DAG.getConstant(M68k::COND_NE, DL, MVT::i8); + Chain = DAG.getNode(M68kISD::BRCOND, DL, Op.getValueType(), + Chain, Dest, CC, Cmp); + CC = DAG.getConstant(M68k::COND_NP, DL, MVT::i8); + Cond = Cmp; + addTest = false; + Dest = FalseBB; + } + } + } */ + } + + if (addTest) { + // Look pass the truncate if the high bits are known zero. + if (isTruncWithZeroHighBitsInput(Cond, DAG)) + Cond = Cond.getOperand(0); + + // We know the result is compared against zero. Try to match it to BT. + if (Cond.hasOneUse()) { + if (SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, DL, DAG)) { + CC = NewSetCC.getOperand(0); + Cond = NewSetCC.getOperand(1); + addTest = false; + } + } + } + + if (addTest) { + M68k::CondCode MxCond = Inverted ? M68k::COND_EQ : M68k::COND_NE; + CC = DAG.getConstant(MxCond, DL, MVT::i8); + Cond = EmitTest(Cond, MxCond, DL, DAG); + } + // Cond = ConvertCmpIfNecessary(Cond, DAG); + return DAG.getNode(M68kISD::BRCOND, DL, Op.getValueType(), Chain, Dest, CC, + Cond); +} + +SDValue M68kTargetLowering::LowerADDC_ADDE_SUBC_SUBE(SDValue Op, + SelectionDAG &DAG) const { + MVT VT = Op.getNode()->getSimpleValueType(0); + + // Let legalize expand this if it isn't a legal type yet. + if (!DAG.getTargetLoweringInfo().isTypeLegal(VT)) + return SDValue(); + + SDVTList VTs = DAG.getVTList(VT, MVT::i8); + + unsigned Opc; + bool ExtraOp = false; + switch (Op.getOpcode()) { + default: + llvm_unreachable("Invalid code"); + case ISD::ADDC: + Opc = M68kISD::ADD; + break; + case ISD::ADDE: + Opc = M68kISD::ADDX; + ExtraOp = true; + break; + case ISD::SUBC: + Opc = M68kISD::SUB; + break; + case ISD::SUBE: + Opc = M68kISD::SUBX; + ExtraOp = true; + break; + } + + if (!ExtraOp) + return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0), Op.getOperand(1)); + return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0), Op.getOperand(1), + Op.getOperand(2)); +} + +// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as +// their target countpart wrapped in the M68kISD::Wrapper node. Suppose N is +// one of the above mentioned nodes. It has to be wrapped because otherwise +// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only +// be used to form addressing mode. These wrapped nodes will be selected +// into MOV32ri. +SDValue M68kTargetLowering::LowerConstantPool(SDValue Op, + SelectionDAG &DAG) const { + ConstantPoolSDNode *CP = cast(Op); + + // In PIC mode (unless we're in PCRel PIC mode) we add an offset to the + // global base reg. + unsigned char OpFlag = Subtarget.classifyLocalReference(nullptr); + + unsigned WrapperKind = M68kISD::Wrapper; + if (M68kII::isPCRelGlobalReference(OpFlag)) { + WrapperKind = M68kISD::WrapperPC; + } + + auto PtrVT = getPointerTy(DAG.getDataLayout()); + SDValue Result = DAG.getTargetConstantPool( + CP->getConstVal(), PtrVT, CP->getAlign(), CP->getOffset(), OpFlag); + + SDLoc DL(CP); + Result = DAG.getNode(WrapperKind, DL, PtrVT, Result); + + // With PIC, the address is actually $g + Offset. + if (M68kII::isGlobalRelativeToPICBase(OpFlag)) { + Result = DAG.getNode(ISD::ADD, DL, PtrVT, + DAG.getNode(M68kISD::GLOBAL_BASE_REG, SDLoc(), PtrVT), + Result); + } + + return Result; +} + +SDValue M68kTargetLowering::LowerExternalSymbol(SDValue Op, + SelectionDAG &DAG) const { + const char *Sym = cast(Op)->getSymbol(); + + // In PIC mode (unless we're in PCRel PIC mode) we add an offset to the + // global base reg. + const Module *Mod = DAG.getMachineFunction().getFunction().getParent(); + unsigned char OpFlag = Subtarget.classifyExternalReference(*Mod); + + unsigned WrapperKind = M68kISD::Wrapper; + if (M68kII::isPCRelGlobalReference(OpFlag)) { + WrapperKind = M68kISD::WrapperPC; + } + + auto PtrVT = getPointerTy(DAG.getDataLayout()); + SDValue Result = DAG.getTargetExternalSymbol(Sym, PtrVT, OpFlag); + + SDLoc DL(Op); + Result = DAG.getNode(WrapperKind, DL, PtrVT, Result); + + // With PIC, the address is actually $g + Offset. + if (M68kII::isGlobalRelativeToPICBase(OpFlag)) { + Result = DAG.getNode(ISD::ADD, DL, PtrVT, + DAG.getNode(M68kISD::GLOBAL_BASE_REG, SDLoc(), PtrVT), + Result); + } + + // For symbols that require a load from a stub to get the address, emit the + // load. + if (M68kII::isGlobalStubReference(OpFlag)) { + Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result, + MachinePointerInfo::getGOT(DAG.getMachineFunction())); + } + + return Result; +} + +SDValue M68kTargetLowering::LowerBlockAddress(SDValue Op, + SelectionDAG &DAG) const { + unsigned char OpFlags = Subtarget.classifyBlockAddressReference(); + const BlockAddress *BA = cast(Op)->getBlockAddress(); + int64_t Offset = cast(Op)->getOffset(); + SDLoc DL(Op); + auto PtrVT = getPointerTy(DAG.getDataLayout()); + + // Create the TargetBlockAddressAddress node. + SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT, Offset, OpFlags); + + if (M68kII::isPCRelBlockReference(OpFlags)) { + Result = DAG.getNode(M68kISD::WrapperPC, DL, PtrVT, Result); + } else { + Result = DAG.getNode(M68kISD::Wrapper, DL, PtrVT, Result); + } + + // With PIC, the address is actually $g + Offset. + if (M68kII::isGlobalRelativeToPICBase(OpFlags)) { + Result = + DAG.getNode(ISD::ADD, DL, PtrVT, + DAG.getNode(M68kISD::GLOBAL_BASE_REG, DL, PtrVT), Result); + } + + return Result; +} + +SDValue M68kTargetLowering::LowerGlobalAddress(const GlobalValue *GV, + const SDLoc &DL, int64_t Offset, + SelectionDAG &DAG) const { + unsigned char OpFlags = Subtarget.classifyGlobalReference(GV); + auto PtrVT = getPointerTy(DAG.getDataLayout()); + + // Create the TargetGlobalAddress node, folding in the constant + // offset if it is legal. + SDValue Result; + if (M68kII::isDirectGlobalReference(OpFlags)) { + Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, Offset); + Offset = 0; + } else { + Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags); + } + + if (M68kII::isPCRelGlobalReference(OpFlags)) + Result = DAG.getNode(M68kISD::WrapperPC, DL, PtrVT, Result); + else + Result = DAG.getNode(M68kISD::Wrapper, DL, PtrVT, Result); + + // With PIC, the address is actually $g + Offset. + if (M68kII::isGlobalRelativeToPICBase(OpFlags)) { + Result = + DAG.getNode(ISD::ADD, DL, PtrVT, + DAG.getNode(M68kISD::GLOBAL_BASE_REG, DL, PtrVT), Result); + } + + // For globals that require a load from a stub to get the address, emit the + // load. + if (M68kII::isGlobalStubReference(OpFlags)) { + Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result, + MachinePointerInfo::getGOT(DAG.getMachineFunction())); + } + + // If there was a non-zero offset that we didn't fold, create an explicit + // addition for it. + if (Offset != 0) { + Result = DAG.getNode(ISD::ADD, DL, PtrVT, Result, + DAG.getConstant(Offset, DL, PtrVT)); + } + + return Result; +} + +SDValue M68kTargetLowering::LowerGlobalAddress(SDValue Op, + SelectionDAG &DAG) const { + const GlobalValue *GV = cast(Op)->getGlobal(); + int64_t Offset = cast(Op)->getOffset(); + return LowerGlobalAddress(GV, SDLoc(Op), Offset, DAG); +} + +//===----------------------------------------------------------------------===// +// Custom Lower Jump Table +//===----------------------------------------------------------------------===// + +SDValue M68kTargetLowering::LowerJumpTable(SDValue Op, + SelectionDAG &DAG) const { + JumpTableSDNode *JT = cast(Op); + + // In PIC mode (unless we're in PCRel PIC mode) we add an offset to the + // global base reg. + unsigned char OpFlag = Subtarget.classifyLocalReference(nullptr); + + unsigned WrapperKind = M68kISD::Wrapper; + if (M68kII::isPCRelGlobalReference(OpFlag)) { + WrapperKind = M68kISD::WrapperPC; + } + + auto PtrVT = getPointerTy(DAG.getDataLayout()); + SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, OpFlag); + SDLoc DL(JT); + Result = DAG.getNode(WrapperKind, DL, PtrVT, Result); + + // With PIC, the address is actually $g + Offset. + if (M68kII::isGlobalRelativeToPICBase(OpFlag)) { + Result = DAG.getNode(ISD::ADD, DL, PtrVT, + DAG.getNode(M68kISD::GLOBAL_BASE_REG, SDLoc(), PtrVT), + Result); + } + + return Result; +} + +unsigned M68kTargetLowering::getJumpTableEncoding() const { + return Subtarget.getJumpTableEncoding(); +} + +const MCExpr *M68kTargetLowering::LowerCustomJumpTableEntry( + const MachineJumpTableInfo *MJTI, const MachineBasicBlock *MBB, + unsigned uid, MCContext &Ctx) const { + return MCSymbolRefExpr::create(MBB->getSymbol(), MCSymbolRefExpr::VK_GOTOFF, + Ctx); +} + +SDValue M68kTargetLowering::getPICJumpTableRelocBase(SDValue Table, + SelectionDAG &DAG) const { + if (getJumpTableEncoding() == MachineJumpTableInfo::EK_Custom32) + return DAG.getNode(M68kISD::GLOBAL_BASE_REG, SDLoc(), + getPointerTy(DAG.getDataLayout())); + + // MachineJumpTableInfo::EK_LabelDifference32 entry + return Table; +} + +// NOTE This only used for MachineJumpTableInfo::EK_LabelDifference32 entries +const MCExpr *M68kTargetLowering::getPICJumpTableRelocBaseExpr( + const MachineFunction *MF, unsigned JTI, MCContext &Ctx) const { + return MCSymbolRefExpr::create(MF->getJTISymbol(JTI, Ctx), Ctx); +} + +/// Determines whether the callee is required to pop its own arguments. +/// Callee pop is necessary to support tail calls. +bool M68k::isCalleePop(CallingConv::ID CallingConv, bool IsVarArg, + bool GuaranteeTCO) { + // FIXME #7 RTD is not available untill M68010 + return false; + // // If GuaranteeTCO is true, we force some calls to be callee pop so that we + // // can guarantee TCO. + // if (!IsVarArg && shouldGuaranteeTCO(CallingConv, GuaranteeTCO)) + // return true; + // + // switch (CallingConv) { + // default: + // return false; + // case CallingConv::M68k_StdCall: + // case CallingConv::M68k_FastCall: + // case CallingConv::M68k_ThisCall: + // case CallingConv::M68k_VectorCall: + // return !is64Bit; + // } +} + +// Return true if it is OK for this CMOV pseudo-opcode to be cascaded +// together with other CMOV pseudo-opcodes into a single basic-block with +// conditional jump around it. +static bool isCMOVPseudo(MachineInstr &MI) { + switch (MI.getOpcode()) { + case M68k::CMOV8d: + case M68k::CMOV16d: + case M68k::CMOV32r: + return true; + + default: + return false; + } +} + +// The CCR operand of SelectItr might be missing a kill marker +// because there were multiple uses of CCR, and ISel didn't know +// which to mark. Figure out whether SelectItr should have had a +// kill marker, and set it if it should. Returns the correct kill +// marker value. +static bool checkAndUpdateCCRKill(MachineBasicBlock::iterator SelectItr, + MachineBasicBlock *BB, + const TargetRegisterInfo *TRI) { + // Scan forward through BB for a use/def of CCR. + MachineBasicBlock::iterator miI(std::next(SelectItr)); + for (MachineBasicBlock::iterator miE = BB->end(); miI != miE; ++miI) { + const MachineInstr &mi = *miI; + if (mi.readsRegister(M68k::CCR)) + return false; + if (mi.definesRegister(M68k::CCR)) + break; // Should have kill-flag - update below. + } + + // If we hit the end of the block, check whether CCR is live into a + // successor. + if (miI == BB->end()) { + for (MachineBasicBlock::succ_iterator sItr = BB->succ_begin(), + sEnd = BB->succ_end(); + sItr != sEnd; ++sItr) { + MachineBasicBlock *succ = *sItr; + if (succ->isLiveIn(M68k::CCR)) + return false; + } + } + + // We found a def, or hit the end of the basic block and CCR wasn't live + // out. SelectMI should have a kill flag on CCR. + SelectItr->addRegisterKilled(M68k::CCR, TRI); + return true; +} + +MachineBasicBlock * +M68kTargetLowering::EmitLoweredSelect(MachineInstr &MI, + MachineBasicBlock *BB) const { + const TargetInstrInfo *TII = Subtarget.getInstrInfo(); + DebugLoc DL = MI.getDebugLoc(); + + // To "insert" a SELECT_CC instruction, we actually have to insert the + // diamond control-flow pattern. The incoming instruction knows the + // destination vreg to set, the condition code register to branch on, the + // true/false values to select between, and a branch opcode to use. + const BasicBlock *LLVM_BB = BB->getBasicBlock(); + MachineFunction::iterator It = ++BB->getIterator(); + + // thisMBB: + // ... + // TrueVal = ... + // cmpTY ccX, r1, r2 + // bCC copy1MBB + // fallthrough --> copy0MBB + MachineBasicBlock *thisMBB = BB; + MachineFunction *F = BB->getParent(); + + // This code lowers all pseudo-CMOV instructions. Generally it lowers these + // as described above, by inserting a BB, and then making a PHI at the join + // point to select the true and false operands of the CMOV in the PHI. + // + // The code also handles two different cases of multiple CMOV opcodes + // in a row. + // + // Case 1: + // In this case, there are multiple CMOVs in a row, all which are based on + // the same condition setting (or the exact opposite condition setting). + // In this case we can lower all the CMOVs using a single inserted BB, and + // then make a number of PHIs at the join point to model the CMOVs. The only + // trickiness here, is that in a case like: + // + // t2 = CMOV cond1 t1, f1 + // t3 = CMOV cond1 t2, f2 + // + // when rewriting this into PHIs, we have to perform some renaming on the + // temps since you cannot have a PHI operand refer to a PHI result earlier + // in the same block. The "simple" but wrong lowering would be: + // + // t2 = PHI t1(BB1), f1(BB2) + // t3 = PHI t2(BB1), f2(BB2) + // + // but clearly t2 is not defined in BB1, so that is incorrect. The proper + // renaming is to note that on the path through BB1, t2 is really just a + // copy of t1, and do that renaming, properly generating: + // + // t2 = PHI t1(BB1), f1(BB2) + // t3 = PHI t1(BB1), f2(BB2) + // + // Case 2, we lower cascaded CMOVs such as + // + // (CMOV (CMOV F, T, cc1), T, cc2) + // + // to two successives branches. For that, we look for another CMOV as the + // following instruction. + // + // Without this, we would add a PHI between the two jumps, which ends up + // creating a few copies all around. For instance, for + // + // (sitofp (zext (fcmp une))) + // + // we would generate: + // + // ucomiss %xmm1, %xmm0 + // movss <1.0f>, %xmm0 + // movaps %xmm0, %xmm1 + // jne .LBB5_2 + // xorps %xmm1, %xmm1 + // .LBB5_2: + // jp .LBB5_4 + // movaps %xmm1, %xmm0 + // .LBB5_4: + // retq + // + // because this custom-inserter would have generated: + // + // A + // | \ + // | B + // | / + // C + // | \ + // | D + // | / + // E + // + // A: X = ...; Y = ... + // B: empty + // C: Z = PHI [X, A], [Y, B] + // D: empty + // E: PHI [X, C], [Z, D] + // + // If we lower both CMOVs in a single step, we can instead generate: + // + // A + // | \ + // | C + // | /| + // |/ | + // | | + // | D + // | / + // E + // + // A: X = ...; Y = ... + // D: empty + // E: PHI [X, A], [X, C], [Y, D] + // + // Which, in our sitofp/fcmp example, gives us something like: + // + // ucomiss %xmm1, %xmm0 + // movss <1.0f>, %xmm0 + // jne .LBB5_4 + // jp .LBB5_4 + // xorps %xmm0, %xmm0 + // .LBB5_4: + // retq + // + MachineInstr *CascadedCMOV = nullptr; + MachineInstr *LastCMOV = &MI; + M68k::CondCode CC = M68k::CondCode(MI.getOperand(3).getImm()); + M68k::CondCode OppCC = M68k::GetOppositeBranchCondition(CC); + MachineBasicBlock::iterator NextMIIt = + std::next(MachineBasicBlock::iterator(MI)); + + // Check for case 1, where there are multiple CMOVs with the same condition + // first. Of the two cases of multiple CMOV lowerings, case 1 reduces the + // number of jumps the most. + + if (isCMOVPseudo(MI)) { + // See if we have a string of CMOVS with the same condition. + while (NextMIIt != BB->end() && isCMOVPseudo(*NextMIIt) && + (NextMIIt->getOperand(3).getImm() == CC || + NextMIIt->getOperand(3).getImm() == OppCC)) { + LastCMOV = &*NextMIIt; + ++NextMIIt; + } + } + + // This checks for case 2, but only do this if we didn't already find + // case 1, as indicated by LastCMOV == MI. + if (LastCMOV == &MI && NextMIIt != BB->end() && + NextMIIt->getOpcode() == MI.getOpcode() && + NextMIIt->getOperand(2).getReg() == MI.getOperand(2).getReg() && + NextMIIt->getOperand(1).getReg() == MI.getOperand(0).getReg() && + NextMIIt->getOperand(1).isKill()) { + CascadedCMOV = &*NextMIIt; + } + + MachineBasicBlock *jcc1MBB = nullptr; + + // If we have a cascaded CMOV, we lower it to two successive branches to + // the same block. CCR is used by both, so mark it as live in the second. + if (CascadedCMOV) { + jcc1MBB = F->CreateMachineBasicBlock(LLVM_BB); + F->insert(It, jcc1MBB); + jcc1MBB->addLiveIn(M68k::CCR); + } + + MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); + MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); + F->insert(It, copy0MBB); + F->insert(It, sinkMBB); + + // If the CCR register isn't dead in the terminator, then claim that it's + // live into the sink and copy blocks. + const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); + + MachineInstr *LastCCRSUser = CascadedCMOV ? CascadedCMOV : LastCMOV; + if (!LastCCRSUser->killsRegister(M68k::CCR) && + !checkAndUpdateCCRKill(LastCCRSUser, BB, TRI)) { + copy0MBB->addLiveIn(M68k::CCR); + sinkMBB->addLiveIn(M68k::CCR); + } + + // Transfer the remainder of BB and its successor edges to sinkMBB. + sinkMBB->splice(sinkMBB->begin(), BB, + std::next(MachineBasicBlock::iterator(LastCMOV)), BB->end()); + sinkMBB->transferSuccessorsAndUpdatePHIs(BB); + + // Add the true and fallthrough blocks as its successors. + if (CascadedCMOV) { + // The fallthrough block may be jcc1MBB, if we have a cascaded CMOV. + BB->addSuccessor(jcc1MBB); + + // In that case, jcc1MBB will itself fallthrough the copy0MBB, and + // jump to the sinkMBB. + jcc1MBB->addSuccessor(copy0MBB); + jcc1MBB->addSuccessor(sinkMBB); + } else { + BB->addSuccessor(copy0MBB); + } + + // The true block target of the first (or only) branch is always sinkMBB. + BB->addSuccessor(sinkMBB); + + // Create the conditional branch instruction. + unsigned Opc = M68k::GetCondBranchFromCond(CC); + BuildMI(BB, DL, TII->get(Opc)).addMBB(sinkMBB); + + if (CascadedCMOV) { + unsigned Opc2 = M68k::GetCondBranchFromCond( + (M68k::CondCode)CascadedCMOV->getOperand(3).getImm()); + BuildMI(jcc1MBB, DL, TII->get(Opc2)).addMBB(sinkMBB); + } + + // copy0MBB: + // %FalseValue = ... + // # fallthrough to sinkMBB + copy0MBB->addSuccessor(sinkMBB); + + // sinkMBB: + // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] + // ... + MachineBasicBlock::iterator MIItBegin = MachineBasicBlock::iterator(MI); + MachineBasicBlock::iterator MIItEnd = + std::next(MachineBasicBlock::iterator(LastCMOV)); + MachineBasicBlock::iterator SinkInsertionPoint = sinkMBB->begin(); + DenseMap> RegRewriteTable; + MachineInstrBuilder MIB; + + // As we are creating the PHIs, we have to be careful if there is more than + // one. Later CMOVs may reference the results of earlier CMOVs, but later + // PHIs have to reference the individual true/false inputs from earlier PHIs. + // That also means that PHI construction must work forward from earlier to + // later, and that the code must maintain a mapping from earlier PHI's + // destination registers, and the registers that went into the PHI. + + for (MachineBasicBlock::iterator MIIt = MIItBegin; MIIt != MIItEnd; ++MIIt) { + unsigned DestReg = MIIt->getOperand(0).getReg(); + unsigned Op1Reg = MIIt->getOperand(1).getReg(); + unsigned Op2Reg = MIIt->getOperand(2).getReg(); + + // If this CMOV we are generating is the opposite condition from + // the jump we generated, then we have to swap the operands for the + // PHI that is going to be generated. + if (MIIt->getOperand(3).getImm() == OppCC) + std::swap(Op1Reg, Op2Reg); + + if (RegRewriteTable.find(Op1Reg) != RegRewriteTable.end()) + Op1Reg = RegRewriteTable[Op1Reg].first; + + if (RegRewriteTable.find(Op2Reg) != RegRewriteTable.end()) + Op2Reg = RegRewriteTable[Op2Reg].second; + + MIB = + BuildMI(*sinkMBB, SinkInsertionPoint, DL, TII->get(M68k::PHI), DestReg) + .addReg(Op1Reg) + .addMBB(copy0MBB) + .addReg(Op2Reg) + .addMBB(thisMBB); + + // Add this PHI to the rewrite table. + RegRewriteTable[DestReg] = std::make_pair(Op1Reg, Op2Reg); + } + + // If we have a cascaded CMOV, the second Jcc provides the same incoming + // value as the first Jcc (the True operand of the SELECT_CC/CMOV nodes). + if (CascadedCMOV) { + MIB.addReg(MI.getOperand(2).getReg()).addMBB(jcc1MBB); + // Copy the PHI result to the register defined by the second CMOV. + BuildMI(*sinkMBB, std::next(MachineBasicBlock::iterator(MIB.getInstr())), + DL, TII->get(TargetOpcode::COPY), + CascadedCMOV->getOperand(0).getReg()) + .addReg(MI.getOperand(0).getReg()); + CascadedCMOV->eraseFromParent(); + } + + // Now remove the CMOV(s). + for (MachineBasicBlock::iterator MIIt = MIItBegin; MIIt != MIItEnd;) + (MIIt++)->eraseFromParent(); + + return sinkMBB; +} + +MachineBasicBlock * +M68kTargetLowering::EmitLoweredSegAlloca(MachineInstr &MI, + MachineBasicBlock *BB) const { + // FIXME #17 See Target TODO.md + llvm_unreachable("Cannot lower Segmented Stack Alloca with stack-split on"); +} + +MachineBasicBlock * +M68kTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, + MachineBasicBlock *BB) const { + switch (MI.getOpcode()) { + default: + llvm_unreachable("Unexpected instr type to insert"); + case M68k::CMOV8d: + case M68k::CMOV16d: + case M68k::CMOV32r: + return EmitLoweredSelect(MI, BB); + case M68k::SALLOCA: + return EmitLoweredSegAlloca(MI, BB); + } +} + +SDValue M68kTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const { + MachineFunction &MF = DAG.getMachineFunction(); + auto PtrVT = getPointerTy(MF.getDataLayout()); + M68kMachineFunctionInfo *FuncInfo = MF.getInfo(); + + const Value *SV = cast(Op.getOperand(2))->getValue(); + SDLoc DL(Op); + + // vastart just stores the address of the VarArgsFrameIndex slot into the + // memory location argument. + SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); + return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1), + MachinePointerInfo(SV)); +} + +// Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets. +// Calls to _alloca are needed to probe the stack when allocating more than 4k +// bytes in one go. Touching the stack at 4K increments is necessary to ensure +// that the guard pages used by the OS virtual memory manager are allocated in +// correct sequence. +SDValue M68kTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, + SelectionDAG &DAG) const { + MachineFunction &MF = DAG.getMachineFunction(); + bool SplitStack = MF.shouldSplitStack(); + + SDLoc DL(Op); + + // Get the inputs. + SDNode *Node = Op.getNode(); + SDValue Chain = Op.getOperand(0); + SDValue Size = Op.getOperand(1); + unsigned Align = cast(Op.getOperand(2))->getZExtValue(); + EVT VT = Node->getValueType(0); + + // Chain the dynamic stack allocation so that it doesn't modify the stack + // pointer when other instructions are using the stack. + Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL); + + SDValue Result; + if (SplitStack) { + auto &MRI = MF.getRegInfo(); + auto SPTy = getPointerTy(DAG.getDataLayout()); + auto *ARClass = getRegClassFor(SPTy); + unsigned Vreg = MRI.createVirtualRegister(ARClass); + Chain = DAG.getCopyToReg(Chain, DL, Vreg, Size); + Result = DAG.getNode(M68kISD::SEG_ALLOCA, DL, SPTy, Chain, + DAG.getRegister(Vreg, SPTy)); + } else { + auto &TLI = DAG.getTargetLoweringInfo(); + unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore(); + assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and" + " not tell us which reg is the stack pointer!"); + + SDValue SP = DAG.getCopyFromReg(Chain, DL, SPReg, VT); + Chain = SP.getValue(1); + const TargetFrameLowering &TFI = *Subtarget.getFrameLowering(); + unsigned StackAlign = TFI.getStackAlignment(); + Result = DAG.getNode(ISD::SUB, DL, VT, SP, Size); // Value + if (Align > StackAlign) + Result = DAG.getNode(ISD::AND, DL, VT, Result, + DAG.getConstant(-(uint64_t)Align, DL, VT)); + Chain = DAG.getCopyToReg(Chain, DL, SPReg, Result); // Output chain + } + + Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, DL, true), + DAG.getIntPtrConstant(0, DL, true), SDValue(), DL); + + SDValue Ops[2] = {Result, Chain}; + return DAG.getMergeValues(Ops, DL); +} + +//===----------------------------------------------------------------------===// +// DAG Combine +//===----------------------------------------------------------------------===// + +static SDValue getSETCC(M68k::CondCode Cond, SDValue CCR, const SDLoc &dl, + SelectionDAG &DAG) { + return DAG.getNode(M68kISD::SETCC, dl, MVT::i8, + DAG.getConstant(Cond, dl, MVT::i8), CCR); +} +// When legalizing carry, we create carries via add X, -1 +// If that comes from an actual carry, via setcc, we use the +// carry directly. +static SDValue combineCarryThroughADD(SDValue CCR) { + if (CCR.getOpcode() == M68kISD::ADD) { + if (isAllOnesConstant(CCR.getOperand(1))) { + SDValue Carry = CCR.getOperand(0); + while ( + Carry.getOpcode() == ISD::TRUNCATE || + Carry.getOpcode() == ISD::ZERO_EXTEND || + Carry.getOpcode() == ISD::SIGN_EXTEND || + Carry.getOpcode() == ISD::ANY_EXTEND || + (Carry.getOpcode() == ISD::AND && isOneConstant(Carry.getOperand(1)))) + Carry = Carry.getOperand(0); + if (Carry.getOpcode() == M68kISD::SETCC || + Carry.getOpcode() == M68kISD::SETCC_CARRY) { + if (Carry.getConstantOperandVal(0) == M68k::COND_CS) + return Carry.getOperand(1); + } + } + } + + return SDValue(); +} + +// Check whether a boolean test is testing a boolean value generated by +// M68kISD::SETCC. If so, return the operand of that SETCC and proper +// condition code. +// +// Simplify the following patterns: +// (Op (CMP (SETCC Cond CCR) 1) EQ) or +// (Op (CMP (SETCC Cond CCR) 0) NEQ) +// to (Op CCR Cond) +// +// (Op (CMP (SETCC Cond CCR) 0) EQ) or +// (Op (CMP (SETCC Cond CCR) 1) NEQ) +// to (Op CCR !Cond) +// +// where Op could be BRCOND or CMOV. +// +static SDValue checkBoolTestSetCCCombine(SDValue Cmp, M68k::CondCode &CC) { + // FIXME #18 Read through, make sure it fits m68k + // // This combine only operates on CMP-like nodes. + // if (!(Cmp.getOpcode() == M68kISD::CMP || + // (Cmp.getOpcode() == M68kISD::SUB && !Cmp->hasAnyUseOfValue(0)))) + // return SDValue(); + // + // // Quit if not used as a boolean value. + // if (CC != M68k::COND_EQ && CC != M68k::COND_NE) + // return SDValue(); + // + // // Check CMP operands. One of them should be 0 or 1 and the other should be + // // an SetCC or extended from it. + // SDValue Op1 = Cmp.getOperand(0); + // SDValue Op2 = Cmp.getOperand(1); + // + // SDValue SetCC; + // const ConstantSDNode *C = nullptr; + // bool needOppositeCond = (CC == M68k::COND_EQ); + // bool checkAgainstTrue = false; // Is it a comparison against 1? + // + // if ((C = dyn_cast(Op1))) + // SetCC = Op2; + // else if ((C = dyn_cast(Op2))) + // SetCC = Op1; + // else // Quit if all operands are not constants. + // return SDValue(); + // + // if (C->getZExtValue() == 1) { + // needOppositeCond = !needOppositeCond; + // checkAgainstTrue = true; + // } else if (C->getZExtValue() != 0) + // // Quit if the constant is neither 0 or 1. + // return SDValue(); + // + // bool truncatedToBoolWithAnd = false; + // // Skip (zext $x), (trunc $x), or (and $x, 1) node. + // while (SetCC.getOpcode() == ISD::ZERO_EXTEND || + // SetCC.getOpcode() == ISD::TRUNCATE || SetCC.getOpcode() == ISD::AND) + // { + // if (SetCC.getOpcode() == ISD::AND) { + // int OpIdx = -1; + // if (isOneConstant(SetCC.getOperand(0))) + // OpIdx = 1; + // if (isOneConstant(SetCC.getOperand(1))) + // OpIdx = 0; + // if (OpIdx < 0) + // break; + // SetCC = SetCC.getOperand(OpIdx); + // truncatedToBoolWithAnd = true; + // } else + // SetCC = SetCC.getOperand(0); + // } + // + // switch (SetCC.getOpcode()) { + // case M68kISD::SETCC_CARRY: + // // Since SETCC_CARRY gives output based on R = CF ? ~0 : 0, it's unsafe + // to + // // simplify it if the result of SETCC_CARRY is not canonicalized to 0 or + // 1, + // // i.e. it's a comparison against true but the result of SETCC_CARRY is + // not + // // truncated to i1 using 'and'. + // if (checkAgainstTrue && !truncatedToBoolWithAnd) + // break; + // assert(M68k::CondCode(SetCC.getConstantOperandVal(0)) == + // M68k::COND_CS && + // "Invalid use of SETCC_CARRY!"); + // LLVM_FALLTHROUGH; + // case M68kISD::SETCC: + // // Set the condition code or opposite one if necessary. + // CC = M68k::CondCode(SetCC.getConstantOperandVal(0)); + // if (needOppositeCond) + // CC = M68k::GetOppositeBranchCondition(CC); + // return SetCC.getOperand(1); + // case M68kISD::CMOV: { + // // Check whether false/true value has canonical one, i.e. 0 or 1. + // ConstantSDNode *FVal = dyn_cast(SetCC.getOperand(0)); + // ConstantSDNode *TVal = dyn_cast(SetCC.getOperand(1)); + // // Quit if true value is not a constant. + // if (!TVal) + // return SDValue(); + // // Quit if false value is not a constant. + // if (!FVal) { + // SDValue Op = SetCC.getOperand(0); + // // Skip 'zext' or 'trunc' node. + // if (Op.getOpcode() == ISD::ZERO_EXTEND || Op.getOpcode() == + // ISD::TRUNCATE) + // Op = Op.getOperand(0); + // // A special case for rdrand/rdseed, where 0 is set if false cond is + // // found. + // if ((Op.getOpcode() != M68kISD::RDRAND && + // Op.getOpcode() != M68kISD::RDSEED) || + // Op.getResNo() != 0) + // return SDValue(); + // } + // // Quit if false value is not the constant 0 or 1. + // bool FValIsFalse = true; + // if (FVal && FVal->getZExtValue() != 0) { + // if (FVal->getZExtValue() != 1) + // return SDValue(); + // // If FVal is 1, opposite cond is needed. + // needOppositeCond = !needOppositeCond; + // FValIsFalse = false; + // } + // // Quit if TVal is not the constant opposite of FVal. + // if (FValIsFalse && TVal->getZExtValue() != 1) + // return SDValue(); + // if (!FValIsFalse && TVal->getZExtValue() != 0) + // return SDValue(); + // CC = M68k::CondCode(SetCC.getConstantOperandVal(2)); + // if (needOppositeCond) + // CC = M68k::GetOppositeBranchCondition(CC); + // return SetCC.getOperand(3); + // } + // } + + return SDValue(); +} + +/// Optimize a CCR definition used according to the condition code \p CC into +/// a simpler CCR value, potentially returning a new \p CC and replacing uses +/// of chain values. +static SDValue combineSetCCCCR(SDValue CCR, M68k::CondCode &CC, + SelectionDAG &DAG, + const M68kSubtarget &Subtarget) { + if (CC == M68k::COND_CS) + if (SDValue Flags = combineCarryThroughADD(CCR)) + return Flags; + + if (SDValue R = checkBoolTestSetCCCombine(CCR, CC)) + return R; + return SDValue(); +} + +// Optimize RES = M68kISD::SETCC CONDCODE, CCR_INPUT +static SDValue combineM68kSetCC(SDNode *N, SelectionDAG &DAG, + const M68kSubtarget &Subtarget) { + SDLoc DL(N); + M68k::CondCode CC = M68k::CondCode(N->getConstantOperandVal(0)); + SDValue CCR = N->getOperand(1); + + // Try to simplify the CCR and condition code operands. + if (SDValue Flags = combineSetCCCCR(CCR, CC, DAG, Subtarget)) + return getSETCC(CC, Flags, DL, DAG); + + return SDValue(); +} +static SDValue combineM68kBrCond(SDNode *N, SelectionDAG &DAG, + const M68kSubtarget &Subtarget) { + SDLoc DL(N); + M68k::CondCode CC = M68k::CondCode(N->getConstantOperandVal(2)); + SDValue CCR = N->getOperand(3); + + // Try to simplify the CCR and condition code operands. + // Make sure to not keep references to operands, as combineSetCCCCR can + // RAUW them under us. + if (SDValue Flags = combineSetCCCCR(CCR, CC, DAG, Subtarget)) { + SDValue Cond = DAG.getConstant(CC, DL, MVT::i8); + return DAG.getNode(M68kISD::BRCOND, DL, N->getVTList(), N->getOperand(0), + N->getOperand(1), Cond, Flags); + } + + return SDValue(); +} + +static SDValue combineSUBX(SDNode *N, SelectionDAG &DAG) { + if (SDValue Flags = combineCarryThroughADD(N->getOperand(2))) { + MVT VT = N->getSimpleValueType(0); + SDVTList VTs = DAG.getVTList(VT, MVT::i32); + return DAG.getNode(M68kISD::SUBX, SDLoc(N), VTs, N->getOperand(0), + N->getOperand(1), Flags); + } + + return SDValue(); +} + +/// Returns true if Elt is a constant zero or a floating point constant +0.0. +// static bool isZeroNode(SDValue Elt) { +// return isNullConstant(Elt) || isNullFPConstant(Elt); +// } + +// Optimize RES, CCR = M68kISD::ADDX LHS, RHS, CCR +static SDValue combineADDX(SDNode *N, SelectionDAG &DAG, + TargetLowering::DAGCombinerInfo &DCI) { + // FIXME #19 Read through, make sure it fits m68k + // // If the LHS and RHS of the ADDX node are zero, then it can't overflow and + // // the result is either zero or one (depending on the input carry bit). + // // Strength reduce this down to a "set on carry" aka SETCC_CARRY&1. + // if (isZeroNode(N->getOperand(0)) && isZeroNode(N->getOperand(1)) && + // // We don't have a good way to replace an CCR use, so only do this when + // // dead right now. + // SDValue(N, 1).use_empty()) { + // SDLoc DL(N); + // EVT VT = N->getValueType(0); + // SDValue CarryOut = DAG.getConstant(0, DL, N->getValueType(1)); + // SDValue Res1 = + // DAG.getNode(ISD::AND, DL, VT, + // DAG.getNode(M68kISD::SETCC_CARRY, DL, VT, + // DAG.getConstant(M68k::COND_CS, DL, + // MVT::i8), N->getOperand(2)), + // DAG.getConstant(1, DL, VT)); + // return DCI.CombineTo(N, Res1, CarryOut); + // } + + if (SDValue Flags = combineCarryThroughADD(N->getOperand(2))) { + MVT VT = N->getSimpleValueType(0); + SDVTList VTs = DAG.getVTList(VT, MVT::i32); + return DAG.getNode(M68kISD::ADDX, SDLoc(N), VTs, N->getOperand(0), + N->getOperand(1), Flags); + } + + return SDValue(); +} + +SDValue M68kTargetLowering::PerformDAGCombine(SDNode *N, + DAGCombinerInfo &DCI) const { + SelectionDAG &DAG = DCI.DAG; + switch (N->getOpcode()) { + case M68kISD::SUBX: + return combineSUBX(N, DAG); + case M68kISD::ADDX: + return combineADDX(N, DAG, DCI); + case M68kISD::SETCC: + return combineM68kSetCC(N, DAG, Subtarget); + case M68kISD::BRCOND: + return combineM68kBrCond(N, DAG, Subtarget); + } + + return SDValue(); +} + +//===----------------------------------------------------------------------===// +// M68kISD Node Names +//===----------------------------------------------------------------------===// +const char *M68kTargetLowering::getTargetNodeName(unsigned Opcode) const { + switch (Opcode) { + case M68kISD::CALL: + return "M68kISD::CALL"; + case M68kISD::TAIL_CALL: + return "M68kISD::TAIL_CALL"; + case M68kISD::RET: + return "M68kISD::RET"; + case M68kISD::TC_RETURN: + return "M68kISD::TC_RETURN"; + case M68kISD::ADD: + return "M68kISD::ADD"; + case M68kISD::SUB: + return "M68kISD::SUB"; + case M68kISD::ADDX: + return "M68kISD::ADDX"; + case M68kISD::SUBX: + return "M68kISD::SUBX"; + case M68kISD::SMUL: + return "M68kISD::SMUL"; + case M68kISD::UMUL: + return "M68kISD::UMUL"; + case M68kISD::OR: + return "M68kISD::OR"; + case M68kISD::XOR: + return "M68kISD::XOR"; + case M68kISD::AND: + return "M68kISD::AND"; + case M68kISD::CMP: + return "M68kISD::CMP"; + case M68kISD::BT: + return "M68kISD::BT"; + case M68kISD::SELECT: + return "M68kISD::SELECT"; + case M68kISD::CMOV: + return "M68kISD::CMOV"; + case M68kISD::BRCOND: + return "M68kISD::BRCOND"; + case M68kISD::SETCC: + return "M68kISD::SETCC"; + case M68kISD::SETCC_CARRY: + return "M68kISD::SETCC_CARRY"; + case M68kISD::GLOBAL_BASE_REG: + return "M68kISD::GLOBAL_BASE_REG"; + case M68kISD::Wrapper: + return "M68kISD::Wrapper"; + case M68kISD::WrapperPC: + return "M68kISD::WrapperPC"; + case M68kISD::SEG_ALLOCA: + return "M68kISD::SEG_ALLOCA"; + default: + return NULL; + } +} Index: llvm/lib/Target/M68k/M68kInstrBuilder.h =================================================================== --- /dev/null +++ llvm/lib/Target/M68k/M68kInstrBuilder.h @@ -0,0 +1,94 @@ +//===-- M68kInstrBuilder.h - Functions to build M68k insts --*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file exposes functions that may be used with BuildMI from the +/// MachineInstrBuilder.h file to handle M68k'isms in a clean way. +/// +/// TODO The BuildMem function may be used with the BuildMI function to add +/// entire memory references in a single, typed, function call. M68k memory +/// references can be very complex expressions (described in the README), so +/// wrapping them up behind an easier to use interface makes sense. +/// Descriptions of the functions are included below. +/// +/// For reference, the order of operands for memory references is: +/// (Operand), Base, Scale, Index, Displacement. +/// +//===----------------------------------------------------------------------===// +// +#ifndef LLVM_LIB_TARGET_M6800_M6800INSTRBUILDER_H +#define LLVM_LIB_TARGET_M6800_M6800INSTRBUILDER_H + +#include "llvm/ADT/SmallVector.h" +#include "llvm/CodeGen/MachineFrameInfo.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineInstr.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineMemOperand.h" +#include "llvm/CodeGen/MachineOperand.h" +#include "llvm/MC/MCInstrDesc.h" + +#include + +namespace llvm { +namespace M68k { +static inline const MachineInstrBuilder & +addOffset(const MachineInstrBuilder &MIB, int Offset) { + return MIB.addImm(Offset); +} + +/// addRegIndirectWithDisp - This function is used to add a memory reference +/// of the form (Offset, Base), i.e., one with no scale or index, but with a +/// displacement. An example is: (4,D0). +static inline const MachineInstrBuilder & +addRegIndirectWithDisp(const MachineInstrBuilder &MIB, unsigned Reg, + bool isKill, int Offset) { + return MIB.addImm(Offset).addReg(Reg, getKillRegState(isKill)); +} + +/// addFrameReference - This function is used to add a reference to the base of +/// an abstract object on the stack frame of the current function. This +/// reference has base register as the FrameIndex offset until it is resolved. +/// This allows a constant offset to be specified as well... +static inline const MachineInstrBuilder & +addFrameReference(const MachineInstrBuilder &MIB, int FI, int Offset = 0) { + MachineInstr *MI = MIB; + MachineFunction &MF = *MI->getParent()->getParent(); + MachineFrameInfo &MFI = MF.getFrameInfo(); + const MCInstrDesc &MCID = MI->getDesc(); + auto Flags = MachineMemOperand::MONone; + if (MCID.mayLoad()) + Flags |= MachineMemOperand::MOLoad; + if (MCID.mayStore()) + Flags |= MachineMemOperand::MOStore; + MachineMemOperand *MMO = MF.getMachineMemOperand( + MachinePointerInfo::getFixedStack(MF, FI, Offset), Flags, + MFI.getObjectSize(FI), MFI.getObjectAlign(FI)); + return MIB.addImm(Offset).addFrameIndex(FI).addMemOperand(MMO); +} + +static inline const MachineInstrBuilder & +addMemOperand(const MachineInstrBuilder &MIB, int FI, int Offset = 0) { + MachineInstr *MI = MIB; + MachineFunction &MF = *MI->getParent()->getParent(); + MachineFrameInfo &MFI = MF.getFrameInfo(); + const MCInstrDesc &MCID = MI->getDesc(); + auto Flags = MachineMemOperand::MONone; + if (MCID.mayLoad()) + Flags |= MachineMemOperand::MOLoad; + if (MCID.mayStore()) + Flags |= MachineMemOperand::MOStore; + MachineMemOperand *MMO = MF.getMachineMemOperand( + MachinePointerInfo::getFixedStack(MF, FI, Offset), Flags, + MFI.getObjectSize(FI), MFI.getObjectAlign(FI)); + return MIB.addMemOperand(MMO); +} +} // end namespace M68k +} // end namespace llvm + +#endif // LLVM_LIB_TARGET_M6800_M6800INSTRBUILDER_H Index: llvm/lib/Target/M68k/M68kInstrInfo.h =================================================================== --- /dev/null +++ llvm/lib/Target/M68k/M68kInstrInfo.h @@ -0,0 +1,342 @@ +//===-- M68kInstrInfo.h - M68k Instruction Information ------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file contains the M68k implementation of the TargetInstrInfo class. +/// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIB_TARGET_M68K_M68KINSTRINFO_H +#define LLVM_LIB_TARGET_M68K_M68KINSTRINFO_H + +#include "M68k.h" +#include "M68kRegisterInfo.h" + +#include "MCTargetDesc/M68kBaseInfo.h" + +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/TargetInstrInfo.h" + +#define GET_INSTRINFO_HEADER +#include "M68kGenInstrInfo.inc" + +namespace llvm { + +class M68kSubtarget; + +namespace M68k { +// Forward declaration +const uint8_t *getMCInstrBeads(unsigned); + +// These MUST be kept in sync with codes definitions in M68kInstrInfo.td +enum CondCode { + COND_T = 0, // True + COND_F = 1, // False + COND_HI = 2, // High + COND_LS = 3, // Less or Same + COND_CC = 4, // Carry Clear + COND_CS = 5, // Carry Set + COND_NE = 6, // Not Equal + COND_EQ = 7, // Equal + COND_VC = 8, // Overflow Clear + COND_VS = 9, // Overflow Set + COND_PL = 10, // Plus + COND_MI = 11, // Minus + COND_GE = 12, // Greater or Equal + COND_LT = 13, // Less Than + COND_GT = 14, // Greater Than + COND_LE = 15, // Less or Equal + LAST_VALID_COND = COND_LE, + COND_INVALID +}; + +// FIXME #25 would be nice tablegen to generate these predicates and converters +// mb tag based + +static inline M68k::CondCode GetOppositeBranchCondition(M68k::CondCode CC) { + switch (CC) { + default: + llvm_unreachable("Illegal condition code!"); + case M68k::COND_T: + return M68k::COND_F; + case M68k::COND_F: + return M68k::COND_T; + case M68k::COND_HI: + return M68k::COND_LS; + case M68k::COND_LS: + return M68k::COND_HI; + case M68k::COND_CC: + return M68k::COND_CS; + case M68k::COND_CS: + return M68k::COND_CC; + case M68k::COND_NE: + return M68k::COND_EQ; + case M68k::COND_EQ: + return M68k::COND_NE; + case M68k::COND_VC: + return M68k::COND_VS; + case M68k::COND_VS: + return M68k::COND_VC; + case M68k::COND_PL: + return M68k::COND_MI; + case M68k::COND_MI: + return M68k::COND_PL; + case M68k::COND_GE: + return M68k::COND_LT; + case M68k::COND_LT: + return M68k::COND_GE; + case M68k::COND_GT: + return M68k::COND_LE; + case M68k::COND_LE: + return M68k::COND_GT; + } +} + +static inline unsigned GetCondBranchFromCond(M68k::CondCode CC) { + switch (CC) { + default: + llvm_unreachable("Illegal condition code!"); + case M68k::COND_EQ: + return M68k::Beq8; + case M68k::COND_NE: + return M68k::Bne8; + case M68k::COND_LT: + return M68k::Blt8; + case M68k::COND_LE: + return M68k::Ble8; + case M68k::COND_GT: + return M68k::Bgt8; + case M68k::COND_GE: + return M68k::Bge8; + case M68k::COND_CS: + return M68k::Bcs8; + case M68k::COND_LS: + return M68k::Bls8; + case M68k::COND_HI: + return M68k::Bhi8; + case M68k::COND_CC: + return M68k::Bcc8; + case M68k::COND_MI: + return M68k::Bmi8; + case M68k::COND_PL: + return M68k::Bpl8; + case M68k::COND_VS: + return M68k::Bvs8; + case M68k::COND_VC: + return M68k::Bvc8; + } +} + +static inline M68k::CondCode GetCondFromBranchOpc(unsigned Opcode) { + switch (Opcode) { + default: + return M68k::COND_INVALID; + case M68k::Beq8: + return M68k::COND_EQ; + case M68k::Bne8: + return M68k::COND_NE; + case M68k::Blt8: + return M68k::COND_LT; + case M68k::Ble8: + return M68k::COND_LE; + case M68k::Bgt8: + return M68k::COND_GT; + case M68k::Bge8: + return M68k::COND_GE; + case M68k::Bcs8: + return M68k::COND_CS; + case M68k::Bls8: + return M68k::COND_LS; + case M68k::Bhi8: + return M68k::COND_HI; + case M68k::Bcc8: + return M68k::COND_CC; + case M68k::Bmi8: + return M68k::COND_MI; + case M68k::Bpl8: + return M68k::COND_PL; + case M68k::Bvs8: + return M68k::COND_VS; + case M68k::Bvc8: + return M68k::COND_VC; + } +} + +static inline unsigned IsCMP(unsigned Op) { + switch (Op) { + default: + return false; + case M68k::CMP8dd: + case M68k::CMP8df: + case M68k::CMP8di: + case M68k::CMP8dj: + case M68k::CMP8dp: + case M68k::CMP16dd: + case M68k::CMP16df: + case M68k::CMP16di: + case M68k::CMP16dj: + case M68k::CMP16dp: + return true; + } +} + +static inline bool IsSETCC(unsigned SETCC) { + switch (SETCC) { + default: + return false; + case M68k::SETd8eq: + case M68k::SETd8ne: + case M68k::SETd8lt: + case M68k::SETd8ge: + case M68k::SETd8le: + case M68k::SETd8gt: + case M68k::SETd8cs: + case M68k::SETd8cc: + case M68k::SETd8ls: + case M68k::SETd8hi: + case M68k::SETd8pl: + case M68k::SETd8mi: + case M68k::SETd8vc: + case M68k::SETd8vs: + case M68k::SETj8eq: + case M68k::SETj8ne: + case M68k::SETj8lt: + case M68k::SETj8ge: + case M68k::SETj8le: + case M68k::SETj8gt: + case M68k::SETj8cs: + case M68k::SETj8cc: + case M68k::SETj8ls: + case M68k::SETj8hi: + case M68k::SETj8pl: + case M68k::SETj8mi: + case M68k::SETj8vc: + case M68k::SETj8vs: + case M68k::SETp8eq: + case M68k::SETp8ne: + case M68k::SETp8lt: + case M68k::SETp8ge: + case M68k::SETp8le: + case M68k::SETp8gt: + case M68k::SETp8cs: + case M68k::SETp8cc: + case M68k::SETp8ls: + case M68k::SETp8hi: + case M68k::SETp8pl: + case M68k::SETp8mi: + case M68k::SETp8vc: + case M68k::SETp8vs: + return true; + } +} + +} // namespace M68k + +class M68kInstrInfo : public M68kGenInstrInfo { + virtual void anchor(); + +protected: + const M68kSubtarget &Subtarget; + const M68kRegisterInfo RI; + +public: + explicit M68kInstrInfo(const M68kSubtarget &STI); + + static const M68kInstrInfo *create(M68kSubtarget &STI); + + /// TargetInstrInfo is a superset of MRegister info. As such, whenever a + /// client has an instance of instruction info, it should always be able to + /// get register info as well (through this method). + const M68kRegisterInfo &getRegisterInfo() const { return RI; }; + + bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, + MachineBasicBlock *&FBB, + SmallVectorImpl &Cond, + bool AllowModify) const override; + + bool AnalyzeBranchImpl(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, + MachineBasicBlock *&FBB, + SmallVectorImpl &Cond, + bool AllowModify) const; + + unsigned removeBranch(MachineBasicBlock &MBB, + int *BytesRemoved = nullptr) const override; + + unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, + MachineBasicBlock *FBB, ArrayRef Cond, + const DebugLoc &DL, + int *BytesAdded = nullptr) const override; + + void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, + const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, + bool KillSrc) const override; + + bool getStackSlotRange(const TargetRegisterClass *RC, unsigned SubIdx, + unsigned &Size, unsigned &Offset, + const MachineFunction &MF) const override; + + void storeRegToStackSlot(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, Register SrcReg, + bool isKill, int FrameIndex, + const TargetRegisterClass *RC, + const TargetRegisterInfo *TRI) const override; + + void loadRegFromStackSlot(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, Register DestReg, + int FrameIndex, const TargetRegisterClass *RC, + const TargetRegisterInfo *TRI) const override; + + bool expandPostRAPseudo(MachineInstr &MI) const override; + + bool isRegisterOperandPCRel(const MachineOperand &MO) const override; + + /// Add appropriate SExt nodes + void AddSExt(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, + DebugLoc DL, unsigned Reg, MVT From, MVT To) const; + + /// Add appropriate ZExt nodes + void AddZExt(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, + DebugLoc DL, unsigned Reg, MVT From, MVT To) const; + + /// Move across register classes without extension + bool ExpandMOVX_RR(MachineInstrBuilder &MIB, MVT MVTDst, MVT MVTSrc) const; + + /// Move from register and extend + bool ExpandMOVSZX_RR(MachineInstrBuilder &MIB, bool isSigned, MVT MVTDst, + MVT MVTSrc) const; + + /// Move from memory and extend + bool ExpandMOVSZX_RM(MachineInstrBuilder &MIB, bool isSigned, + const MCInstrDesc &Desc, MVT MVTDst, MVT MVTSrc) const; + + /// Push/Pop to/from stack + bool ExpandPUSH_POP(MachineInstrBuilder &MIB, const MCInstrDesc &Desc, + bool isPush) const; + + /// Moves to/from CCR + bool ExpandCCR(MachineInstrBuilder &MIB, bool isToCCR) const; + + /// Expand all MOVEM pseudos into real MOVEMs + bool ExpandMOVEM(MachineInstrBuilder &MIB, const MCInstrDesc &Desc, + bool isRM) const; + + /// Return a virtual register initialized with the the global base register + /// value. Output instructions required to initialize the register in the + /// function entry block, if necessary. + unsigned getGlobalBaseReg(MachineFunction *MF) const; + + std::pair + decomposeMachineOperandsTargetFlags(unsigned TF) const override; + + ArrayRef> + getSerializableDirectMachineOperandTargetFlags() const override; +}; + +} // namespace llvm + +#endif Index: llvm/lib/Target/M68k/M68kInstrInfo.cpp =================================================================== --- /dev/null +++ llvm/lib/Target/M68k/M68kInstrInfo.cpp @@ -0,0 +1,885 @@ +//===-- M68kInstrInfo.cpp - M68k Instruction Information ----*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file contains the M68k declaration of the TargetInstrInfo class. +/// +//===----------------------------------------------------------------------===// + +#include "M68kInstrInfo.h" + +#include "M68kInstrBuilder.h" +#include "M68kMachineFunction.h" +#include "M68kTargetMachine.h" + +#include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/ScopeExit.h" +#include "llvm/CodeGen/LivePhysRegs.h" +#include "llvm/CodeGen/LiveVariables.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/TargetRegistry.h" + +#include + +using namespace llvm; + +#define DEBUG_TYPE "M68k-instr-info" + +#define GET_INSTRINFO_CTOR_DTOR +#include "M68kGenInstrInfo.inc" + +// Pin the vtable to this file. +void M68kInstrInfo::anchor() {} + +M68kInstrInfo::M68kInstrInfo(const M68kSubtarget &STI) + : M68kGenInstrInfo(M68k::ADJCALLSTACKDOWN, M68k::ADJCALLSTACKUP, 0, + M68k::RET), + Subtarget(STI), RI(STI) {} + +static M68k::CondCode getCondFromBranchOpc(unsigned BrOpc) { + switch (BrOpc) { + default: + return M68k::COND_INVALID; + case M68k::Beq8: + return M68k::COND_EQ; + case M68k::Bne8: + return M68k::COND_NE; + case M68k::Blt8: + return M68k::COND_LT; + case M68k::Ble8: + return M68k::COND_LE; + case M68k::Bgt8: + return M68k::COND_GT; + case M68k::Bge8: + return M68k::COND_GE; + case M68k::Bcs8: + return M68k::COND_CS; + case M68k::Bls8: + return M68k::COND_LS; + case M68k::Bhi8: + return M68k::COND_HI; + case M68k::Bcc8: + return M68k::COND_CC; + case M68k::Bmi8: + return M68k::COND_MI; + case M68k::Bpl8: + return M68k::COND_PL; + case M68k::Bvs8: + return M68k::COND_VS; + case M68k::Bvc8: + return M68k::COND_VC; + } +} + +bool M68kInstrInfo::AnalyzeBranchImpl(MachineBasicBlock &MBB, + MachineBasicBlock *&TBB, + MachineBasicBlock *&FBB, + SmallVectorImpl &Cond, + bool AllowModify) const { + + auto UncondBranch = + std::pair{ + MBB.rend(), nullptr}; + + // Erase any instructions if allowed at the end of the scope. + std::vector> EraseList; + auto FinalizeOnReturn = llvm::make_scope_exit([&EraseList] { + std::for_each(EraseList.begin(), EraseList.end(), + [](decltype(EraseList)::value_type &ref) { + ref.get().eraseFromParent(); + }); + }); + + // Start from the bottom of the block and work up, examining the + // terminator instructions. + for (auto iter = MBB.rbegin(); iter != MBB.rend(); iter = std::next(iter)) { + + auto Opcode = iter->getOpcode(); + + if (iter->isDebugInstr()) { + continue; + } + + // Working from the bottom, when we see a non-terminator instruction, we're + // done. + if (!isUnpredicatedTerminator(*iter)) { + break; + } + + // A terminator that isn't a branch can't easily be handled by this + // analysis. + if (!iter->isBranch()) { + return true; + } + + // Handle unconditional branches. + if (Opcode == M68k::BRA8 || Opcode == M68k::BRA16) { + if (!iter->getOperand(0).isMBB()) + return true; + UncondBranch = {iter, iter->getOperand(0).getMBB()}; + + // TBB is used to indicate the unconditional destination. + TBB = UncondBranch.second; + + if (!AllowModify) { + continue; + } + + // If the block has any instructions after a JMP, erase them. + EraseList.insert(EraseList.begin(), MBB.rbegin(), iter); + + Cond.clear(); + FBB = nullptr; + + // Erase the JMP if it's equivalent to a fall-through. + if (MBB.isLayoutSuccessor(UncondBranch.second)) { + TBB = nullptr; + EraseList.push_back(*iter); + UncondBranch = {MBB.rend(), nullptr}; + } + + continue; + } + + // Handle conditional branches. + auto BranchCode = M68k::GetCondFromBranchOpc(Opcode); + + // Can't handle indirect branch. + if (BranchCode == M68k::COND_INVALID) { + return true; + } + + // In practice we should never have an undef CCR operand, if we do + // abort here as we are not prepared to preserve the flag. + // ??? Is this required? + // if (iter->getOperand(1).isUndef()) + // return true; + + // Working from the bottom, handle the first conditional branch. + if (Cond.empty()) { + if (!iter->getOperand(0).isMBB()) + return true; + MachineBasicBlock *CondBranchTarget = iter->getOperand(0).getMBB(); + + // If we see something like this: + // + // bcc l1 + // bra l2 + // ... + // l1: + // ... + // l2: + if (UncondBranch.first != MBB.rend()) { + + assert(std::next(UncondBranch.first) == iter && "Wrong block layout."); + + // And we are allowed to modify the block and the target block of the + // conditional branch is the direct successor of this block: + // + // bcc l1 + // bra l2 + // l1: + // ... + // l2: + // + // we change it to this if allowed: + // + // bncc l2 + // l1: + // ... + // l2: + // + // Which is a bit more efficient. + if (AllowModify && MBB.isLayoutSuccessor(CondBranchTarget)) { + + BranchCode = GetOppositeBranchCondition(BranchCode); + unsigned BNCC = GetCondBranchFromCond(BranchCode); + + BuildMI(MBB, *UncondBranch.first, MBB.rfindDebugLoc(iter), get(BNCC)) + .addMBB(UncondBranch.second); + + EraseList.push_back(*iter); + EraseList.push_back(*UncondBranch.first); + + TBB = UncondBranch.second; + FBB = nullptr; + Cond.push_back(MachineOperand::CreateImm(BranchCode)); + + // Otherwise preserve TBB, FBB and Cond as requested + } else { + TBB = CondBranchTarget; + FBB = UncondBranch.second; + Cond.push_back(MachineOperand::CreateImm(BranchCode)); + } + + UncondBranch = {MBB.rend(), nullptr}; + continue; + } + + TBB = CondBranchTarget; + FBB = nullptr; + Cond.push_back(MachineOperand::CreateImm(BranchCode)); + + continue; + } + + // Handle subsequent conditional branches. Only handle the case where all + // conditional branches branch to the same destination and their condition + // opcodes fit one of the special multi-branch idioms. + assert(Cond.size() == 1); + assert(TBB); + + // If the conditions are the same, we can leave them alone. + auto OldBranchCode = static_cast(Cond[0].getImm()); + if (!iter->getOperand(0).isMBB()) + return true; + auto NewTBB = iter->getOperand(0).getMBB(); + if (OldBranchCode == BranchCode && TBB == NewTBB) { + continue; + } + + // If they differ we cannot do much here. + return true; + } + + return false; +} + +bool M68kInstrInfo::analyzeBranch(MachineBasicBlock &MBB, + MachineBasicBlock *&TBB, + MachineBasicBlock *&FBB, + SmallVectorImpl &Cond, + bool AllowModify) const { + return AnalyzeBranchImpl(MBB, TBB, FBB, Cond, AllowModify); +} + +unsigned M68kInstrInfo::removeBranch(MachineBasicBlock &MBB, + int *BytesRemoved) const { + assert(!BytesRemoved && "code size not handled"); + + MachineBasicBlock::iterator I = MBB.end(); + unsigned Count = 0; + + while (I != MBB.begin()) { + --I; + if (I->isDebugValue()) + continue; + if (I->getOpcode() != M68k::BRA8 && + getCondFromBranchOpc(I->getOpcode()) == M68k::COND_INVALID) + break; + // Remove the branch. + I->eraseFromParent(); + I = MBB.end(); + ++Count; + } + + return Count; +} + +unsigned M68kInstrInfo::insertBranch( + MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, + ArrayRef Cond, const DebugLoc &DL, int *BytesAdded) const { + // Shouldn't be a fall through. + assert(TBB && "InsertBranch must not be told to insert a fallthrough"); + assert((Cond.size() == 1 || Cond.size() == 0) && + "M68k branch conditions have one component!"); + assert(!BytesAdded && "code size not handled"); + + if (Cond.empty()) { + // Unconditional branch? + assert(!FBB && "Unconditional branch with multiple successors!"); + BuildMI(&MBB, DL, get(M68k::BRA8)).addMBB(TBB); + return 1; + } + + // If FBB is null, it is implied to be a fall-through block. + bool FallThru = FBB == nullptr; + + // Conditional branch. + unsigned Count = 0; + M68k::CondCode CC = (M68k::CondCode)Cond[0].getImm(); + unsigned Opc = GetCondBranchFromCond(CC); + BuildMI(&MBB, DL, get(Opc)).addMBB(TBB); + ++Count; + if (!FallThru) { + // Two-way Conditional branch. Insert the second branch. + BuildMI(&MBB, DL, get(M68k::BRA8)).addMBB(FBB); + ++Count; + } + return Count; +} + +void M68kInstrInfo::AddSExt(MachineBasicBlock &MBB, + MachineBasicBlock::iterator I, DebugLoc DL, + unsigned Reg, MVT From, MVT To) const { + if (From == MVT::i8) { + unsigned R = Reg; + // EXT16 requires i16 register + if (To == MVT::i32) { + R = RI.getSubReg(Reg, M68k::MxSubRegIndex16Lo); + assert(R && "No viable SUB register available"); + } + BuildMI(MBB, I, DL, get(M68k::EXT16), R).addReg(R); + } + + if (To == MVT::i32) { + BuildMI(MBB, I, DL, get(M68k::EXT32), Reg).addReg(Reg); + } +} + +void M68kInstrInfo::AddZExt(MachineBasicBlock &MBB, + MachineBasicBlock::iterator I, DebugLoc DL, + unsigned Reg, MVT From, MVT To) const { + + unsigned Mask, And; + if (From == MVT::i8) { + Mask = 0xFF; + } else { + Mask = 0xFFFF; + } + + if (To == MVT::i16) { + And = M68k::AND16di; + } else { // i32 + And = M68k::AND32di; + } + + // TODO #46 use xor r,r to decrease size + BuildMI(MBB, I, DL, get(And), Reg).addReg(Reg).addImm(Mask); +} + +bool M68kInstrInfo::ExpandMOVX_RR(MachineInstrBuilder &MIB, MVT MVTDst, + MVT MVTSrc) const { + unsigned Move = MVTDst == MVT::i16 ? M68k::MOV16rr : M68k::MOV32rr; + unsigned Dst = MIB->getOperand(0).getReg(); + unsigned Src = MIB->getOperand(1).getReg(); + + assert(Dst != Src && "You cannot use the same Regs with MOVX_RR"); + + const auto &TRI = getRegisterInfo(); + + const auto *RCDst = TRI.getMaximalPhysRegClass(Dst, MVTDst); + const auto *RCSrc = TRI.getMaximalPhysRegClass(Src, MVTSrc); + + assert(RCDst && RCSrc && "Wrong use of MOVX_RR"); + assert(RCDst != RCSrc && "You cannot use the same Reg Classes with MOVX_RR"); + + // We need to find the super source register that matches the size of Dst + unsigned SSrc = RI.getMatchingMegaReg(Src, RCDst); + assert(SSrc && "No viable MEGA register available"); + + DebugLoc DL = MIB->getDebugLoc(); + + // If it happens to that super source register is the destination register + // we do nothing + if (Dst == SSrc) { + LLVM_DEBUG(dbgs() << "Remove " << *MIB.getInstr() << '\n'); + MIB->eraseFromParent(); + } else { // otherwise we need to MOV + LLVM_DEBUG(dbgs() << "Expand " << *MIB.getInstr() << " to MOV\n"); + MIB->setDesc(get(Move)); + MIB->getOperand(1).setReg(SSrc); + } + + return true; +} + +/// Expand SExt MOVE pseudos into a MOV and a EXT if the operands are two +/// different registers or just EXT if it is the same register +bool M68kInstrInfo::ExpandMOVSZX_RR(MachineInstrBuilder &MIB, bool isSigned, + MVT MVTDst, MVT MVTSrc) const { + LLVM_DEBUG(dbgs() << "Expand " << *MIB.getInstr() << " to "); + + unsigned Move; + + if (MVTDst == MVT::i16) { + Move = M68k::MOV16rr; + } else { // i32 + Move = M68k::MOV32rr; + } + + unsigned Dst = MIB->getOperand(0).getReg(); + unsigned Src = MIB->getOperand(1).getReg(); + + assert(Dst != Src && "You cannot use the same Regs with MOVSX_RR"); + + const auto &TRI = getRegisterInfo(); + + const auto *RCDst = TRI.getMaximalPhysRegClass(Dst, MVTDst); + const auto *RCSrc = TRI.getMaximalPhysRegClass(Src, MVTSrc); + + assert(RCDst && RCSrc && "Wrong use of MOVSX_RR"); + assert(RCDst != RCSrc && "You cannot use the same Reg Classes with MOVSX_RR"); + + // We need to find the super source register that matches the size of Dst + unsigned SSrc = RI.getMatchingMegaReg(Src, RCDst); + assert(SSrc && "No viable MEGA register available"); + + MachineBasicBlock &MBB = *MIB->getParent(); + DebugLoc DL = MIB->getDebugLoc(); + + if (Dst != SSrc) { + LLVM_DEBUG(dbgs() << "Move and " << '\n'); + BuildMI(MBB, MIB.getInstr(), DL, get(Move), Dst).addReg(SSrc); + } + + if (isSigned) { + LLVM_DEBUG(dbgs() << "Sign Extend" << '\n'); + AddSExt(MBB, MIB.getInstr(), DL, Dst, MVTSrc, MVTDst); + } else { + LLVM_DEBUG(dbgs() << "Zero Extend" << '\n'); + AddZExt(MBB, MIB.getInstr(), DL, Dst, MVTSrc, MVTDst); + } + + MIB->eraseFromParent(); + + return true; +} + +bool M68kInstrInfo::ExpandMOVSZX_RM(MachineInstrBuilder &MIB, bool isSigned, + const MCInstrDesc &Desc, MVT MVTDst, + MVT MVTSrc) const { + LLVM_DEBUG(dbgs() << "Expand " << *MIB.getInstr() << " to LOAD and "); + + unsigned Dst = MIB->getOperand(0).getReg(); + + // We need the subreg of Dst to make instruction verifier happy because the + // real machine instruction consumes and produces values of the same size and + // the registers the will be used here fall into different classes and this + // makes IV cry. We could of course use bigger operation but this will put + // some pressure on cache and memory so no. + unsigned SubDst = + RI.getSubReg(Dst, MVTSrc == MVT::i8 ? M68k::MxSubRegIndex8Lo + : M68k::MxSubRegIndex16Lo); + assert(SubDst && "No viable SUB register available"); + + // Make this a plain move + MIB->setDesc(Desc); + MIB->getOperand(0).setReg(SubDst); + + MachineBasicBlock::iterator I = MIB.getInstr(); + I++; + MachineBasicBlock &MBB = *MIB->getParent(); + DebugLoc DL = MIB->getDebugLoc(); + + if (isSigned) { + LLVM_DEBUG(dbgs() << "Sign Extend" << '\n'); + AddSExt(MBB, I, DL, Dst, MVTSrc, MVTDst); + } else { + LLVM_DEBUG(dbgs() << "Zero Extend" << '\n'); + AddZExt(MBB, I, DL, Dst, MVTSrc, MVTDst); + } + + return true; +} + +bool M68kInstrInfo::ExpandPUSH_POP(MachineInstrBuilder &MIB, + const MCInstrDesc &Desc, bool isPush) const { + MachineBasicBlock::iterator I = MIB.getInstr(); + I++; + MachineBasicBlock &MBB = *MIB->getParent(); + MachineOperand MO = MIB->getOperand(0); + DebugLoc DL = MIB->getDebugLoc(); + if (isPush) { + BuildMI(MBB, I, DL, Desc).addReg(RI.getStackRegister()).add(MO); + } else { + BuildMI(MBB, I, DL, Desc, MO.getReg()).addReg(RI.getStackRegister()); + } + MIB->eraseFromParent(); + return true; +} + +bool M68kInstrInfo::ExpandCCR(MachineInstrBuilder &MIB, bool isToCCR) const { + + // Replace the pseudo instruction with the real one + if (isToCCR) { + MIB->setDesc(get(M68k::MOV16cd)); + } else { + // FIXME #24 M68010 or better is required + MIB->setDesc(get(M68k::MOV16dc)); + } + + // Promote used register to the next class + auto &Opd = MIB->getOperand(1); + Opd.setReg(getRegisterInfo().getMatchingSuperReg( + Opd.getReg(), M68k::MxSubRegIndex8Lo, &M68k::DR16RegClass)); + + return true; +} + +bool M68kInstrInfo::ExpandMOVEM(MachineInstrBuilder &MIB, + const MCInstrDesc &Desc, bool isRM) const { + int Reg = 0, Offset = 0, Base = 0; + auto XR32 = RI.getRegClass(M68k::XR32RegClassID); + auto DL = MIB->getDebugLoc(); + auto MI = MIB.getInstr(); + auto &MBB = *MIB->getParent(); + + if (isRM) { + Reg = MIB->getOperand(0).getReg(); + Offset = MIB->getOperand(1).getImm(); + Base = MIB->getOperand(2).getReg(); + } else { + Offset = MIB->getOperand(0).getImm(); + Base = MIB->getOperand(1).getReg(); + Reg = MIB->getOperand(2).getReg(); + } + + // If the register is not in XR32 then it is smaller than 32 bit, we + // implicitly promote it to 32 + if (!XR32->contains(Reg)) { + Reg = RI.getMatchingMegaReg(Reg, XR32); + assert(Reg && "Has not meaningful MEGA register"); + } + + unsigned Mask = 1 << RI.getSpillRegisterOrder(Reg); + if (isRM) { + BuildMI(MBB, MI, DL, Desc) + .addImm(Mask) + .addImm(Offset) + .addReg(Base) + .addReg(Reg, RegState::ImplicitDefine) + .copyImplicitOps(*MIB); + } else { + BuildMI(MBB, MI, DL, Desc) + .addImm(Offset) + .addReg(Base) + .addImm(Mask) + .addReg(Reg, RegState::Implicit) + .copyImplicitOps(*MIB); + } + + MIB->eraseFromParent(); + + return true; +} + +/// Expand a single-def pseudo instruction to a two-addr +/// instruction with two undef reads of the register being defined. +/// This is used for mapping: +/// %d0 = SETCS_C32d +/// to: +/// %d0 = SUBX32dd %d0, %d0 +/// +static bool Expand2AddrUndef(MachineInstrBuilder &MIB, + const MCInstrDesc &Desc) { + assert(Desc.getNumOperands() == 3 && "Expected two-addr instruction."); + unsigned Reg = MIB->getOperand(0).getReg(); + MIB->setDesc(Desc); + + // MachineInstr::addOperand() will insert explicit operands before any + // implicit operands. + MIB.addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef); + // But we don't trust that. + assert(MIB->getOperand(1).getReg() == Reg && + MIB->getOperand(2).getReg() == Reg && "Misplaced operand"); + return true; +} + +bool M68kInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { + MachineInstrBuilder MIB(*MI.getParent()->getParent(), MI); + switch (MI.getOpcode()) { + case M68k::PUSH8d: + return ExpandPUSH_POP(MIB, get(M68k::MOV8ed), true); + case M68k::PUSH16d: + return ExpandPUSH_POP(MIB, get(M68k::MOV16er), true); + case M68k::PUSH32r: + return ExpandPUSH_POP(MIB, get(M68k::MOV32er), true); + + case M68k::POP8d: + return ExpandPUSH_POP(MIB, get(M68k::MOV8do), false); + case M68k::POP16d: + return ExpandPUSH_POP(MIB, get(M68k::MOV16ro), false); + case M68k::POP32r: + return ExpandPUSH_POP(MIB, get(M68k::MOV32ro), false); + + case M68k::SETCS_C8d: + return Expand2AddrUndef(MIB, get(M68k::SUBX8dd)); + case M68k::SETCS_C16d: + return Expand2AddrUndef(MIB, get(M68k::SUBX16dd)); + case M68k::SETCS_C32d: + return Expand2AddrUndef(MIB, get(M68k::SUBX32dd)); + } + return false; +} + +bool M68kInstrInfo::isRegisterOperandPCRel(const MachineOperand &MO) const { + assert(MO.isReg()); + const auto *MI = MO.getParent(); + const uint8_t *Beads = M68k::getMCInstrBeads(MI->getOpcode()); + assert(*Beads); + + // Only addressing mode k has (non-pc) register with PCRel + // So we're looking for EA Beads equal to + // `3Bits<011>_1Bit<1>_2Bits<11>` + // FIXME: There is an important caveat and two assumptions + // here: The caveat is that EA encoding always sit on the LSB. + // Where the assumptions are that if there are more than one + // operands, the EA encoding for the source operand always sit + // on the LSB. At the same time, k addressing mode can not be used + // on destination operand. + // The last assumption is kinda dirty so we need to find a way around + // it + const uint8_t EncEAk[3] = {0b011, 0b1, 0b11}; + for (const uint8_t Pat : EncEAk) { + uint8_t Bead = *(Beads++); + if (!Bead) + return false; + + switch (Bead & 0xF) { + default: + return false; + case M68kBeads::Bits1: + case M68kBeads::Bits2: + case M68kBeads::Bits3: { + uint8_t Val = (Bead & 0xF0) >> 4; + if (Val != Pat) + return false; + } + } + } + return true; +} + +void M68kInstrInfo::copyPhysReg(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, + const DebugLoc &DL, MCRegister DstReg, + MCRegister SrcReg, bool KillSrc) const { + unsigned Opc = 0; + + // First deal with the normal symmetric copies. + if (M68k::XR32RegClass.contains(DstReg, SrcReg)) + Opc = M68k::MOV32rr; + else if (M68k::XR16RegClass.contains(DstReg, SrcReg)) + Opc = M68k::MOV16rr; + else if (M68k::DR8RegClass.contains(DstReg, SrcReg)) { + Opc = M68k::MOV8dd; + } + + if (Opc) { + BuildMI(MBB, MI, DL, get(Opc), DstReg) + .addReg(SrcReg, getKillRegState(KillSrc)); + return; + } + + // Now deal with asymmetrically sized copies. The cases that follow are upcast + // moves. + // + // NOTE + // These moves are not aware of type nature of these values and thus + // won't do any SExt or ZExt and upper bits will basically contain garbage. + MachineInstrBuilder MIB(*MBB.getParent(), MI); + if (M68k::DR8RegClass.contains(SrcReg)) { + if (M68k::XR16RegClass.contains(DstReg)) { + Opc = M68k::MOVXd16d8; + } else if (M68k::XR32RegClass.contains(DstReg)) { + Opc = M68k::MOVXd32d8; + } + } else if (M68k::XR16RegClass.contains(SrcReg)) { + if (M68k::XR32RegClass.contains(DstReg)) { + Opc = M68k::MOVXd32d16; + } + } + + if (Opc) { + BuildMI(MBB, MI, DL, get(Opc), DstReg) + .addReg(SrcReg, getKillRegState(KillSrc)); + return; + } + + bool FromCCR = SrcReg == M68k::CCR; + bool FromSR = SrcReg == M68k::SR; + bool ToCCR = DstReg == M68k::CCR; + bool ToSR = DstReg == M68k::SR; + + if (FromCCR) { + assert(M68k::DR8RegClass.contains(DstReg) && + "Need DR8 register to copy CCR"); + Opc = M68k::MOV8dc; + } else if (ToCCR) { + assert(M68k::DR8RegClass.contains(SrcReg) && + "Need DR8 register to copy CCR"); + Opc = M68k::MOV8cd; + } else if (FromSR || ToSR) { + llvm_unreachable("Cannot emit SR copy instruction"); + } + + if (Opc) { + BuildMI(MBB, MI, DL, get(Opc), DstReg) + .addReg(SrcReg, getKillRegState(KillSrc)); + return; + } + + LLVM_DEBUG(dbgs() << "Cannot copy " << RI.getName(SrcReg) << " to " + << RI.getName(DstReg) << '\n'); + llvm_unreachable("Cannot emit physreg copy instruction"); +} + +namespace { +unsigned getLoadStoreRegOpcode(unsigned Reg, const TargetRegisterClass *RC, + const TargetRegisterInfo *TRI, + const M68kSubtarget &STI, bool load) { + switch (TRI->getRegSizeInBits(*RC)) { + default: + llvm_unreachable("Unknown spill size"); + case 8: + if (M68k::DR8RegClass.hasSubClassEq(RC)) { + return load ? M68k::MOVM8mp_P : M68k::MOVM8pm_P; + } else if (M68k::CCRCRegClass.hasSubClassEq(RC)) { + return load ? M68k::MOV16cp : M68k::MOV16pc; + } + llvm_unreachable("Unknown 1-byte regclass"); + case 16: + assert(M68k::XR16RegClass.hasSubClassEq(RC) && "Unknown 2-byte regclass"); + return load ? M68k::MOVM16mp_P : M68k::MOVM16pm_P; + case 32: + assert(M68k::XR32RegClass.hasSubClassEq(RC) && "Unknown 4-byte regclass"); + return load ? M68k::MOVM32mp_P : M68k::MOVM32pm_P; + } +} + +unsigned getStoreRegOpcode(unsigned SrcReg, const TargetRegisterClass *RC, + const TargetRegisterInfo *TRI, + const M68kSubtarget &STI) { + return getLoadStoreRegOpcode(SrcReg, RC, TRI, STI, false); +} + +unsigned getLoadRegOpcode(unsigned DstReg, const TargetRegisterClass *RC, + const TargetRegisterInfo *TRI, + const M68kSubtarget &STI) { + return getLoadStoreRegOpcode(DstReg, RC, TRI, STI, true); +} +} // end anonymous namespace + +bool M68kInstrInfo::getStackSlotRange(const TargetRegisterClass *RC, + unsigned SubIdx, unsigned &Size, + unsigned &Offset, + const MachineFunction &MF) const { + // The slot size must be the maximum size so we can easily use MOVEM.L + Size = 4; + Offset = 0; + return true; +} + +void M68kInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, + Register SrcReg, bool isKill, + int FrameIndex, + const TargetRegisterClass *RC, + const TargetRegisterInfo *TRI) const { + const MachineFunction &MF = *MBB.getParent(); + assert(MF.getFrameInfo().getObjectSize(FrameIndex) == 4 && + "Stack slot too small for store"); + unsigned Opc = getStoreRegOpcode(SrcReg, RC, TRI, Subtarget); + DebugLoc DL = MBB.findDebugLoc(MI); + // (0,FrameIndex) <- $reg + M68k::addFrameReference(BuildMI(MBB, MI, DL, get(Opc)), FrameIndex) + .addReg(SrcReg, getKillRegState(isKill)); +} + +void M68kInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, + Register DstReg, int FrameIndex, + const TargetRegisterClass *RC, + const TargetRegisterInfo *TRI) const { + const MachineFunction &MF = *MBB.getParent(); + assert(MF.getFrameInfo().getObjectSize(FrameIndex) == 4 && + "Stack slot too small for store"); + unsigned Opc = getLoadRegOpcode(DstReg, RC, TRI, Subtarget); + DebugLoc DL = MBB.findDebugLoc(MI); + M68k::addFrameReference(BuildMI(MBB, MI, DL, get(Opc), DstReg), FrameIndex); +} + +/// Return a virtual register initialized with the the global base register +/// value. Output instructions required to initialize the register in the +/// function entry block, if necessary. +/// +/// TODOss #47 Eliminate this and move the code to M68kMachineFunctionInfo. +unsigned M68kInstrInfo::getGlobalBaseReg(MachineFunction *MF) const { + M68kMachineFunctionInfo *MxFI = MF->getInfo(); + unsigned GlobalBaseReg = MxFI->getGlobalBaseReg(); + if (GlobalBaseReg != 0) + return GlobalBaseReg; + + // Create the register. The code to initialize it is inserted later, + // by the CGBR pass (below). + // + // NOTE + // Normally M68k uses A5 register as global base pointer but this will + // create unnecessary spill if we use less then 4 registers in code; since A5 + // is callee-save anyway we could try to allocate caller-save first and if + // lucky get one, otherwise it does not really matter which callee-save to + // use. + MachineRegisterInfo &RegInfo = MF->getRegInfo(); + GlobalBaseReg = RegInfo.createVirtualRegister(&M68k::AR32_NOSPRegClass); + MxFI->setGlobalBaseReg(GlobalBaseReg); + return GlobalBaseReg; +} + +std::pair +M68kInstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const { + return std::make_pair(TF, 0u); +} + +ArrayRef> +M68kInstrInfo::getSerializableDirectMachineOperandTargetFlags() const { + using namespace M68kII; + static const std::pair TargetFlags[] = { + {MO_ABSOLUTE_ADDRESS, "m68k-absolute"}, + {MO_PC_RELATIVE_ADDRESS, "m68k-pcrel"}, + {MO_GOT, "m68k-got"}, + {MO_GOTOFF, "m68k-gotoff"}, + {MO_GOTPCREL, "m68k-gotpcrel"}, + {MO_PLT, "m68k-plt"}}; + return makeArrayRef(TargetFlags); +} + +namespace { +/// Create Global Base Reg pass. This initializes the PIC global base register +struct CGBR : public MachineFunctionPass { + static char ID; + CGBR() : MachineFunctionPass(ID) {} + + bool runOnMachineFunction(MachineFunction &MF) override { + const M68kSubtarget &STI = MF.getSubtarget(); + M68kMachineFunctionInfo *MxFI = MF.getInfo(); + + unsigned GlobalBaseReg = MxFI->getGlobalBaseReg(); + + // If we didn't need a GlobalBaseReg, don't insert code. + if (GlobalBaseReg == 0) + return false; + + // Insert the set of GlobalBaseReg into the first MBB of the function + MachineBasicBlock &FirstMBB = MF.front(); + MachineBasicBlock::iterator MBBI = FirstMBB.begin(); + DebugLoc DL = FirstMBB.findDebugLoc(MBBI); + const M68kInstrInfo *TII = STI.getInstrInfo(); + + // Generate lea (__GLOBAL_OFFSET_TABLE_,%PC), %A5 + BuildMI(FirstMBB, MBBI, DL, TII->get(M68k::LEA32q), GlobalBaseReg) + .addExternalSymbol("_GLOBAL_OFFSET_TABLE_", M68kII::MO_GOTPCREL); + + return true; + } + + StringRef getPassName() const override { + return "M68k PIC Global Base Reg Initialization"; + } + + void getAnalysisUsage(AnalysisUsage &AU) const override { + AU.setPreservesCFG(); + MachineFunctionPass::getAnalysisUsage(AU); + } +}; +} // namespace + +char CGBR::ID = 0; +FunctionPass *llvm::createM68kGlobalBaseRegPass() { return new CGBR(); } Index: llvm/lib/Target/M68k/M68kMCInstLower.h =================================================================== --- /dev/null +++ llvm/lib/Target/M68k/M68kMCInstLower.h @@ -0,0 +1,55 @@ +//===-- M68kMCInstLower.h - Lower MachineInstr to MCInst -----*- C++ -*--===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file contains code to lower M68k MachineInstrs to their +/// corresponding MCInst records. +/// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIB_TARGET_M68K_M68KMCINSTLOWER_H +#define LLVM_LIB_TARGET_M68K_M68KMCINSTLOWER_H + +#include "llvm/ADT/SmallVector.h" +#include "llvm/CodeGen/MachineOperand.h" +#include "llvm/MC/MCAsmInfo.h" +#include "llvm/Support/Compiler.h" + +namespace llvm { +class MCContext; +class MCInst; +class MCOperand; +class MachineInstr; +class MachineFunction; +class M68kAsmPrinter; + +/// This class is used to lower an MachineInstr into an MCInst. +class M68kMCInstLower { + typedef MachineOperand::MachineOperandType MachineOperandType; + MCContext &Ctx; + MachineFunction &MF; + const TargetMachine &TM; + const MCAsmInfo &MAI; + M68kAsmPrinter &AsmPrinter; + +public: + M68kMCInstLower(MachineFunction &MF, M68kAsmPrinter &AP); + + /// Lower an MO_GlobalAddress or MO_ExternalSymbol operand to an MCSymbol. + MCSymbol *GetSymbolFromOperand(const MachineOperand &MO) const; + + MCOperand LowerSymbolOperand(const MachineOperand &MO, MCSymbol *Sym) const; + + Optional LowerOperand(const MachineInstr *MI, + const MachineOperand &MO) const; + + void Lower(const MachineInstr *MI, MCInst &OutMI) const; +}; +} // namespace llvm + +#endif Index: llvm/lib/Target/M68k/M68kMCInstLower.cpp =================================================================== --- /dev/null +++ llvm/lib/Target/M68k/M68kMCInstLower.cpp @@ -0,0 +1,179 @@ +//===-- M68kMCInstLower.cpp - M68k MachineInstr to MCInst ---*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file contains code to lower M68k MachineInstrs to their +/// corresponding MCInst records. +/// +//===----------------------------------------------------------------------===// + +#include "M68kMCInstLower.h" + +#include "M68kAsmPrinter.h" +#include "M68kInstrInfo.h" + +#include "MCTargetDesc/M68kBaseInfo.h" + +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineInstr.h" +#include "llvm/CodeGen/MachineOperand.h" +#include "llvm/IR/Mangler.h" +#include "llvm/MC/MCContext.h" +#include "llvm/MC/MCExpr.h" +#include "llvm/MC/MCInst.h" + +using namespace llvm; + +#define DEBUG_TYPE "m68k-mc-inst-lower" + +M68kMCInstLower::M68kMCInstLower(MachineFunction &MF, M68kAsmPrinter &AP) + : Ctx(MF.getContext()), MF(MF), TM(MF.getTarget()), MAI(*TM.getMCAsmInfo()), + AsmPrinter(AP) {} + +MCSymbol * +M68kMCInstLower::GetSymbolFromOperand(const MachineOperand &MO) const { + assert((MO.isGlobal() || MO.isSymbol() || MO.isMBB()) && + "Isn't a symbol reference"); + + const auto &TT = TM.getTargetTriple(); + if (MO.isGlobal() && TT.isOSBinFormatELF()) + return AsmPrinter.getSymbolPreferLocal(*MO.getGlobal()); + + const DataLayout &DL = MF.getDataLayout(); + + MCSymbol *Sym = nullptr; + SmallString<128> Name; + StringRef Suffix; + + if (!Suffix.empty()) + Name += DL.getPrivateGlobalPrefix(); + + if (MO.isGlobal()) { + const GlobalValue *GV = MO.getGlobal(); + AsmPrinter.getNameWithPrefix(Name, GV); + } else if (MO.isSymbol()) { + Mangler::getNameWithPrefix(Name, MO.getSymbolName(), DL); + } else if (MO.isMBB()) { + assert(Suffix.empty()); + Sym = MO.getMBB()->getSymbol(); + } + + Name += Suffix; + if (!Sym) + Sym = Ctx.getOrCreateSymbol(Name); + + return Sym; +} + +MCOperand M68kMCInstLower::LowerSymbolOperand(const MachineOperand &MO, + MCSymbol *Sym) const { + // FIXME We would like an efficient form for this, so we don't have to do a + // lot of extra uniquing. This fixme is originally from X86 + const MCExpr *Expr = nullptr; + MCSymbolRefExpr::VariantKind RefKind = MCSymbolRefExpr::VK_None; + + switch (MO.getTargetFlags()) { + default: + llvm_unreachable("Unknown target flag on GV operand"); + case M68kII::MO_NO_FLAG: + case M68kII::MO_ABSOLUTE_ADDRESS: + case M68kII::MO_PC_RELATIVE_ADDRESS: + break; + case M68kII::MO_GOTPCREL: + RefKind = MCSymbolRefExpr::VK_GOTPCREL; + break; + case M68kII::MO_GOT: + RefKind = MCSymbolRefExpr::VK_GOT; + break; + case M68kII::MO_GOTOFF: + RefKind = MCSymbolRefExpr::VK_GOTOFF; + break; + case M68kII::MO_PLT: + RefKind = MCSymbolRefExpr::VK_PLT; + break; + } + + if (!Expr) { + Expr = MCSymbolRefExpr::create(Sym, RefKind, Ctx); + } + + if (!MO.isJTI() && !MO.isMBB() && MO.getOffset()) { + Expr = MCBinaryExpr::createAdd( + Expr, MCConstantExpr::create(MO.getOffset(), Ctx), Ctx); + } + + return MCOperand::createExpr(Expr); +} + +Optional +M68kMCInstLower::LowerOperand(const MachineInstr *MI, + const MachineOperand &MO) const { + switch (MO.getType()) { + default: + LLVM_DEBUG(MI->dump()); + llvm_unreachable("unknown operand type"); + case MachineOperand::MO_Register: + // Ignore all implicit register operands. + if (MO.isImplicit()) + return None; + return MCOperand::createReg(MO.getReg()); + case MachineOperand::MO_Immediate: + return MCOperand::createImm(MO.getImm()); + case MachineOperand::MO_MachineBasicBlock: + case MachineOperand::MO_GlobalAddress: + case MachineOperand::MO_ExternalSymbol: + return LowerSymbolOperand(MO, GetSymbolFromOperand(MO)); + case MachineOperand::MO_MCSymbol: + return LowerSymbolOperand(MO, MO.getMCSymbol()); + case MachineOperand::MO_JumpTableIndex: + return LowerSymbolOperand(MO, AsmPrinter.GetJTISymbol(MO.getIndex())); + case MachineOperand::MO_ConstantPoolIndex: + return LowerSymbolOperand(MO, AsmPrinter.GetCPISymbol(MO.getIndex())); + case MachineOperand::MO_BlockAddress: + return LowerSymbolOperand( + MO, AsmPrinter.GetBlockAddressSymbol(MO.getBlockAddress())); + case MachineOperand::MO_RegisterMask: + // Ignore call clobbers. + return None; + } +} + +void M68kMCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const { + OutMI.setOpcode(MI->getOpcode()); + + for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { + const MachineOperand &MO = MI->getOperand(i); + Optional MCOp = LowerOperand(MI, MO); + + if (MCOp.hasValue() && MCOp.getValue().isValid()) + OutMI.addOperand(MCOp.getValue()); + } + + switch (OutMI.getOpcode()) { + + // TAILJMPj, TAILJMPq - Lower to the correct jump instructions. + case M68k::TAILJMPj: + case M68k::TAILJMPq: { + unsigned Opcode; + switch (OutMI.getOpcode()) { + default: + llvm_unreachable("Invalid opcode"); + case M68k::TAILJMPj: + Opcode = M68k::JMP32j; + break; + case M68k::TAILJMPq: + Opcode = M68k::BRA8; + break; + } + + assert(OutMI.getNumOperands() == 1 && "Unexpected number of operands"); + OutMI.setOpcode(Opcode); + break; + } + } +} Index: llvm/lib/Target/M68k/M68kMachineFunction.h =================================================================== --- /dev/null +++ llvm/lib/Target/M68k/M68kMachineFunction.h @@ -0,0 +1,115 @@ +//===-- M68kMachineFunctionInfo.h - M68k private data ---------*- C++ -*-=// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file declares the M68k specific subclass of MachineFunctionInfo. +/// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIB_TARGET_M68K_M68KMACHINEFUNCTION_H +#define LLVM_LIB_TARGET_M68K_M68KMACHINEFUNCTION_H + +#include "llvm/CodeGen/CallingConvLower.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/Support/MachineValueType.h" + +namespace llvm { + +class M68kMachineFunctionInfo : public MachineFunctionInfo { + MachineFunction &MF; + + /// Non-zero if the function has base pointer and makes call to + /// llvm.eh.sjlj.setjmp. When non-zero, the value is a displacement from the + /// frame pointer to a slot where the base pointer is stashed. + signed char RestoreBasePointerOffset = 0; + + /// Size of the callee-saved register portion of the stack frame in bytes. + unsigned CalleeSavedFrameSize = 0; + + /// Number of bytes function pops on return (in addition to the space used by + /// the return address). Used on windows platform for stdcall & fastcall + /// name decoration + unsigned BytesToPopOnReturn = 0; + + /// FrameIndex for return slot. + int ReturnAddrIndex = 0; + + /// The number of bytes by which return address stack slot is moved as the + /// result of tail call optimization. + int TailCallReturnAddrDelta = 0; + + /// keeps track of the virtual register initialized for use as the global + /// base register. This is used for PIC in some PIC relocation models. + unsigned GlobalBaseReg = 0; + + /// FrameIndex for start of varargs area. + int VarArgsFrameIndex = 0; + + /// Keeps track of whether this function uses sequences of pushes to pass + /// function parameters. + bool HasPushSequences = false; + + /// Some subtargets require that sret lowering includes + /// returning the value of the returned struct in a register. This field + /// holds the virtual register into which the sret argument is passed. + unsigned SRetReturnReg = 0; + + /// A list of virtual and physical registers that must be forwarded to every + /// musttail call. + SmallVector ForwardedMustTailRegParms; + + /// The number of bytes on stack consumed by the arguments being passed on + /// the stack. + unsigned ArgumentStackSize = 0; + +public: + M68kMachineFunctionInfo() = default; + explicit M68kMachineFunctionInfo(MachineFunction &MF) : MF(MF) {} + + bool getRestoreBasePointer() const { return RestoreBasePointerOffset != 0; } + void setRestoreBasePointer(const MachineFunction *MF); + int getRestoreBasePointerOffset() const { return RestoreBasePointerOffset; } + + unsigned getCalleeSavedFrameSize() const { return CalleeSavedFrameSize; } + void setCalleeSavedFrameSize(unsigned bytes) { CalleeSavedFrameSize = bytes; } + + unsigned getBytesToPopOnReturn() const { return BytesToPopOnReturn; } + void setBytesToPopOnReturn(unsigned bytes) { BytesToPopOnReturn = bytes; } + + int getRAIndex() const { return ReturnAddrIndex; } + void setRAIndex(int Index) { ReturnAddrIndex = Index; } + + int getTCReturnAddrDelta() const { return TailCallReturnAddrDelta; } + void setTCReturnAddrDelta(int delta) { TailCallReturnAddrDelta = delta; } + + unsigned getGlobalBaseReg() const { return GlobalBaseReg; } + void setGlobalBaseReg(unsigned Reg) { GlobalBaseReg = Reg; } + + int getVarArgsFrameIndex() const { return VarArgsFrameIndex; } + void setVarArgsFrameIndex(int Index) { VarArgsFrameIndex = Index; } + + bool getHasPushSequences() const { return HasPushSequences; } + void setHasPushSequences(bool HasPush) { HasPushSequences = HasPush; } + + unsigned getSRetReturnReg() const { return SRetReturnReg; } + void setSRetReturnReg(unsigned Reg) { SRetReturnReg = Reg; } + + unsigned getArgumentStackSize() const { return ArgumentStackSize; } + void setArgumentStackSize(unsigned size) { ArgumentStackSize = size; } + + SmallVectorImpl &getForwardedMustTailRegParms() { + return ForwardedMustTailRegParms; + } + +private: + virtual void anchor(); +}; + +} // end of namespace llvm + +#endif // M68K_MACHINE_FUNCTION_INFO_H Index: llvm/lib/Target/M68k/M68kMachineFunction.cpp =================================================================== --- /dev/null +++ llvm/lib/Target/M68k/M68kMachineFunction.cpp @@ -0,0 +1,20 @@ +//===-- M68kMachineFunctionInfo.cpp - M68k private data ----*- C++ -*--===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "M68kMachineFunction.h" + +#include "M68kInstrInfo.h" +#include "M68kSubtarget.h" + +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/IR/Function.h" + +using namespace llvm; + +void M68kMachineFunctionInfo::anchor() {} Index: llvm/lib/Target/M68k/M68kRegisterInfo.h =================================================================== --- /dev/null +++ llvm/lib/Target/M68k/M68kRegisterInfo.h @@ -0,0 +1,109 @@ +//===-- M68kRegisterInfo.h - M68k Register Information Impl --*- C++ --===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file contains the M68k implementation of the TargetRegisterInfo +/// class. +/// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIB_TARGET_M68K_M68KREGISTERINFO_H +#define LLVM_LIB_TARGET_M68K_M68KREGISTERINFO_H + +#include "M68k.h" + +#include "llvm/CodeGen/TargetRegisterInfo.h" + +#define GET_REGINFO_HEADER +#include "M68kGenRegisterInfo.inc" + +namespace llvm { +class M68kSubtarget; +class TargetInstrInfo; +class Type; + +class M68kRegisterInfo : public M68kGenRegisterInfo { + virtual void anchor(); + + /// Physical register used as stack ptr. + unsigned StackPtr; + + /// Physical register used as frame ptr. + unsigned FramePtr; + + /// Physical register used as a base ptr in complex stack frames. I.e., when + /// we need a 3rd base, not just SP and FP, due to variable size stack + /// objects. + unsigned BasePtr; + + /// Physical register used to store GOT address if needed. + unsigned GlobalBasePtr; + +protected: + const M68kSubtarget &Subtarget; + +public: + M68kRegisterInfo(const M68kSubtarget &Subtarget); + + const MCPhysReg *getCalleeSavedRegs(const MachineFunction *MF) const override; + + const uint32_t *getCallPreservedMask(const MachineFunction &MF, + CallingConv::ID) const override; + + /// Returns a register class with registers that can be used in forming tail + /// calls. + const TargetRegisterClass * + getRegsForTailCall(const MachineFunction &MF) const; + + /// Return a mega-register of the specified register Reg so its sub-register + /// of index SubIdx is Reg, its super(or mega) Reg. In other words it will + /// return a register that is not direct super register but still shares + /// physical register with Reg. + /// NOTE not sure about the term though. + unsigned getMatchingMegaReg(unsigned Reg, + const TargetRegisterClass *RC) const; + + /// Returns the Register Class of a physical register of the given type, + /// picking the biggest register class of the right type that contains this + /// physreg. + const TargetRegisterClass *getMaximalPhysRegClass(unsigned reg, MVT VT) const; + + /// Return index of a register within a register class, otherwise return -1 + int getRegisterOrder(unsigned Reg, const TargetRegisterClass &TRC) const; + + /// Return spill order index of a register, if there is none then trap + int getSpillRegisterOrder(unsigned Reg) const; + + BitVector getReservedRegs(const MachineFunction &MF) const override; + + bool requiresRegisterScavenging(const MachineFunction &MF) const override; + + bool trackLivenessAfterRegAlloc(const MachineFunction &MF) const override; + + /// FrameIndex represent objects inside a abstract stack. We must replace + /// FrameIndex with an stack/frame pointer direct reference. + void eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj, + unsigned FIOperandNum, + RegScavenger *RS = nullptr) const override; + + bool hasBasePointer(const MachineFunction &MF) const; + + /// True if the stack can be realigned for the target. + bool canRealignStack(const MachineFunction &MF) const override; + + Register getFrameRegister(const MachineFunction &MF) const override; + unsigned getStackRegister() const { return StackPtr; } + unsigned getBaseRegister() const { return BasePtr; } + unsigned getGlobalBaseRegister() const { return GlobalBasePtr; } + + const TargetRegisterClass *intRegClass(unsigned Size) const; +}; + +} // end namespace llvm + +#endif Index: llvm/lib/Target/M68k/M68kRegisterInfo.cpp =================================================================== --- /dev/null +++ llvm/lib/Target/M68k/M68kRegisterInfo.cpp @@ -0,0 +1,263 @@ +//===-- M68kRegisterInfo.cpp - CPU0 Register Information -----*- C++ -*--===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file contains the CPU0 implementation of the TargetRegisterInfo class. +/// +//===----------------------------------------------------------------------===// + +#define DEBUG_TYPE "m68k-reg-info" + +#include "M68kRegisterInfo.h" + +#include "M68k.h" +#include "M68kMachineFunction.h" +#include "M68kSubtarget.h" + +#include "MCTargetDesc/M68kMCTargetDesc.h" + +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/IR/Function.h" +#include "llvm/IR/Type.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/raw_ostream.h" + +#define GET_REGINFO_TARGET_DESC +#include "M68kGenRegisterInfo.inc" + +using namespace llvm; + +static cl::opt EnableBasePointer( + "m68k-use-base-pointer", cl::Hidden, cl::init(true), + cl::desc("Enable use of a base pointer for complex stack frames")); + +// Pin the vtable to this file. +void M68kRegisterInfo::anchor() {} + +M68kRegisterInfo::M68kRegisterInfo(const M68kSubtarget &ST) + // FIXME x26 not sure it this the correct value, it expects RA, but M68k + // passes IP anyway, how this works? + : M68kGenRegisterInfo(M68k::A0, 0, 0, M68k::PC), Subtarget(ST) { + StackPtr = M68k::SP; + FramePtr = M68k::A6; + GlobalBasePtr = M68k::A5; + BasePtr = M68k::A4; +} + +//===----------------------------------------------------------------------===// +// Callee Saved Registers methods +//===----------------------------------------------------------------------===// + +const MCPhysReg * +M68kRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { + return CSR_STD_SaveList; +} + +const uint32_t * +M68kRegisterInfo::getCallPreservedMask(const MachineFunction &MF, + CallingConv::ID) const { + return CSR_STD_RegMask; +} + +const TargetRegisterClass * +M68kRegisterInfo::getRegsForTailCall(const MachineFunction &MF) const { + return &M68k::XR32_TCRegClass; +} + +unsigned +M68kRegisterInfo::getMatchingMegaReg(unsigned Reg, + const TargetRegisterClass *RC) const { + for (MCSuperRegIterator Super(Reg, this); Super.isValid(); ++Super) + if (RC->contains(*Super)) + return *Super; + return 0; +} + +const TargetRegisterClass * +M68kRegisterInfo::getMaximalPhysRegClass(unsigned reg, MVT VT) const { + assert(Register::isPhysicalRegister(reg) && + "reg must be a physical register"); + + // Pick the most sub register class of the right type that contains + // this physreg. + const TargetRegisterClass *BestRC = nullptr; + for (regclass_iterator I = regclass_begin(), E = regclass_end(); I != E; + ++I) { + const TargetRegisterClass *RC = *I; + if ((VT == MVT::Other || isTypeLegalForClass(*RC, VT)) && + RC->contains(reg) && + (!BestRC || + (BestRC->hasSubClass(RC) && RC->getNumRegs() > BestRC->getNumRegs()))) + BestRC = RC; + } + + assert(BestRC && "Couldn't find the register class"); + return BestRC; +} + +int M68kRegisterInfo::getRegisterOrder(unsigned Reg, + const TargetRegisterClass &TRC) const { + for (unsigned i = 0; i < TRC.getNumRegs(); ++i) { + if (regsOverlap(Reg, TRC.getRegister(i))) { + return i; + } + } + return -1; +} + +int M68kRegisterInfo::getSpillRegisterOrder(unsigned Reg) const { + int Result = getRegisterOrder(Reg, *getRegClass(M68k::SPILLRegClassID)); + assert(Result >= 0 && "Can not determine spill order"); + return Result; +} + +BitVector M68kRegisterInfo::getReservedRegs(const MachineFunction &MF) const { + const M68kFrameLowering *TFI = getFrameLowering(MF); + + BitVector Reserved(getNumRegs()); + + // Set a register's and its sub-registers and aliases as reserved. + auto setBitVector = [&Reserved, this](unsigned Reg) { + for (MCRegAliasIterator I(Reg, this, /* self */ true); I.isValid(); ++I) { + Reserved.set(*I); + } + for (MCSubRegIterator I(Reg, this, /* self */ true); I.isValid(); ++I) { + Reserved.set(*I); + } + }; + + setBitVector(M68k::PC); + setBitVector(M68k::SP); + + if (TFI->hasFP(MF)) { + setBitVector(FramePtr); + } + + // Set the base-pointer register and its aliases as reserved if needed. + if (hasBasePointer(MF)) { + CallingConv::ID CC = MF.getFunction().getCallingConv(); + const uint32_t *RegMask = getCallPreservedMask(MF, CC); + if (MachineOperand::clobbersPhysReg(RegMask, getBaseRegister())) + report_fatal_error("Stack realignment in presence of dynamic allocas is " + "not supported with" + "this calling convention."); + + setBitVector(getBaseRegister()); + } + + return Reserved; +} + +void M68kRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, + int SPAdj, unsigned FIOperandNum, + RegScavenger *RS) const { + MachineInstr &MI = *II; + MachineFunction &MF = *MI.getParent()->getParent(); + const M68kFrameLowering *TFI = getFrameLowering(MF); + + // We have either (i,An,Rn) or (i,An) EA form + // NOTE Base contains the FI and we need to backtrace a bit to get Disp + MachineOperand &Disp = MI.getOperand(FIOperandNum - 1); + MachineOperand &Base = MI.getOperand(FIOperandNum); + + int Imm = (int)(Disp.getImm()); + int FIndex = (int)(Base.getIndex()); + + // unsigned Opc = MI.getOpcode(); + // FIXME #7 there is no jmp from mem yet + // bool AfterFPPop = Opc == M68k::TAILJMPm || Opc == M68k::TCRETURNmi; + bool AfterFPPop = false; + + unsigned BasePtr; + if (hasBasePointer(MF)) + BasePtr = (FIndex < 0 ? FramePtr : getBaseRegister()); + else if (needsStackRealignment(MF)) + BasePtr = (FIndex < 0 ? FramePtr : StackPtr); + else if (AfterFPPop) + BasePtr = StackPtr; + else + BasePtr = (TFI->hasFP(MF) ? FramePtr : StackPtr); + + Base.ChangeToRegister(BasePtr, false); + + // Now add the frame object offset to the offset from FP. + int64_t FIOffset; + Register IgnoredFrameReg; + if (AfterFPPop) { + // Tail call jmp happens after FP is popped. + const MachineFrameInfo &MFI = MF.getFrameInfo(); + FIOffset = MFI.getObjectOffset(FIndex) - TFI->getOffsetOfLocalArea(); + } else { + FIOffset = + TFI->getFrameIndexReference(MF, FIndex, IgnoredFrameReg).getFixed(); + } + + if (BasePtr == StackPtr) + FIOffset += SPAdj; + + Disp.ChangeToImmediate(FIOffset + Imm); +} + +bool M68kRegisterInfo::requiresRegisterScavenging( + const MachineFunction &MF) const { + return true; +} + +bool M68kRegisterInfo::trackLivenessAfterRegAlloc( + const MachineFunction &MF) const { + return true; +} + +static bool CantUseSP(const MachineFrameInfo &MFI) { + return MFI.hasVarSizedObjects() || MFI.hasOpaqueSPAdjustment(); +} + +bool M68kRegisterInfo::hasBasePointer(const MachineFunction &MF) const { + const MachineFrameInfo &MFI = MF.getFrameInfo(); + + if (!EnableBasePointer) + return false; + + // When we need stack realignment, we can't address the stack from the frame + // pointer. When we have dynamic allocas or stack-adjusting inline asm, we + // can't address variables from the stack pointer. MS inline asm can + // reference locals while also adjusting the stack pointer. When we can't + // use both the SP and the FP, we need a separate base pointer register. + bool CantUseFP = needsStackRealignment(MF); + return CantUseFP && CantUseSP(MFI); +} + +bool M68kRegisterInfo::canRealignStack(const MachineFunction &MF) const { + if (!TargetRegisterInfo::canRealignStack(MF)) + return false; + + const MachineFrameInfo &MFI = MF.getFrameInfo(); + const MachineRegisterInfo *MRI = &MF.getRegInfo(); + + // Stack realignment requires a frame pointer. If we already started + // register allocation with frame pointer elimination, it is too late now. + if (!MRI->canReserveReg(FramePtr)) + return false; + + // If a base pointer is necessary. Check that it isn't too late to reserve it. + if (CantUseSP(MFI)) + return MRI->canReserveReg(BasePtr); + + return true; +} + +Register M68kRegisterInfo::getFrameRegister(const MachineFunction &MF) const { + const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); + return TFI->hasFP(MF) ? FramePtr : StackPtr; +} + +const TargetRegisterClass *M68kRegisterInfo::intRegClass(unsigned size) const { + return &M68k::DR32RegClass; +} Index: llvm/lib/Target/M68k/M68kSubtarget.h =================================================================== --- /dev/null +++ llvm/lib/Target/M68k/M68kSubtarget.h @@ -0,0 +1,157 @@ +//===-- M68kSubtarget.h - Define Subtarget for the M68k -----*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file declares the M68k specific subclass of TargetSubtargetInfo. +/// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIB_TARGET_CPU0_M68KSUBTARGET_H +#define LLVM_LIB_TARGET_CPU0_M68KSUBTARGET_H + +#include "M68kFrameLowering.h" +#include "M68kISelLowering.h" +#include "M68kInstrInfo.h" + +#include "llvm/CodeGen/SelectionDAGTargetInfo.h" +#include "llvm/CodeGen/TargetSubtargetInfo.h" +#include "llvm/IR/DataLayout.h" +#include "llvm/MC/MCInstrItineraries.h" +#include "llvm/Support/Alignment.h" + +#include + +#define GET_SUBTARGETINFO_HEADER +#include "M68kGenSubtargetInfo.inc" + +extern bool M68kReserveGP; +extern bool M68kNoCpload; + +namespace llvm { +class StringRef; + +class M68kTargetMachine; + +class M68kSubtarget : public M68kGenSubtargetInfo { + virtual void anchor(); + +protected: + // These define which ISA is supported. Since each Motorola M68k ISA is + // built on top of the previous one whenever an ISA is selected the previous + // selected as well. + enum SubtargetEnum { M00, M10, M20, M30, M40, M60 }; + SubtargetEnum SubtargetKind = M00; + + InstrItineraryData InstrItins; + + /// Small section is used. + bool UseSmallSection = true; + + const M68kTargetMachine &TM; + + SelectionDAGTargetInfo TSInfo; + M68kInstrInfo InstrInfo; + M68kFrameLowering FrameLowering; + M68kTargetLowering TLInfo; + + /// The minimum alignment known to hold of the stack frame on + /// entry to the function and which must be maintained by every function. + unsigned stackAlignment = 8; + + Triple TargetTriple; + +public: + /// This constructor initializes the data members to match that + /// of the specified triple. + M68kSubtarget(const Triple &TT, StringRef CPU, StringRef FS, + const M68kTargetMachine &_TM); + + /// Parses features string setting specified subtarget options. Definition + /// of function is auto generated by tblgen. + void ParseSubtargetFeatures(StringRef CPU, StringRef TuneCPU, StringRef FS); + + bool atLeastM68000() const { return SubtargetKind >= M00; } + bool atLeastM68010() const { return SubtargetKind >= M10; } + bool atLeastM68020() const { return SubtargetKind >= M20; } + bool atLeastM68030() const { return SubtargetKind >= M30; } + bool atLeastM68040() const { return SubtargetKind >= M40; } + bool atLeastM68060() const { return SubtargetKind >= M60; } + + bool useSmallSection() const { return UseSmallSection; } + + bool abiUsesSoftFloat() const; + + const Triple &getTargetTriple() const { return TargetTriple; } + + bool isTargetELF() const { return TargetTriple.isOSBinFormatELF(); } + + /// Return true if the subtarget allows calls to immediate address. + bool isLegalToCallImmediateAddr() const; + + bool isPositionIndependent() const; + + /// Classify a global variable reference for the current subtarget according + /// to how we should reference it in a non-pcrel context. + unsigned char classifyLocalReference(const GlobalValue *GV) const; + + /// Classify a global variable reference for the current subtarget according + /// to how we should reference it in a non-pcrel context. + unsigned char classifyGlobalReference(const GlobalValue *GV, + const Module &M) const; + unsigned char classifyGlobalReference(const GlobalValue *GV) const; + + /// Classify a external variable reference for the current subtarget according + /// to how we should reference it in a non-pcrel context. + unsigned char classifyExternalReference(const Module &M) const; + + /// Classify a global function reference for the current subtarget. + unsigned char classifyGlobalFunctionReference(const GlobalValue *GV, + const Module &M) const; + unsigned char classifyGlobalFunctionReference(const GlobalValue *GV) const; + + /// Classify a blockaddress reference for the current subtarget according to + /// how we should reference it in a non-pcrel context. + unsigned char classifyBlockAddressReference() const; + + unsigned getJumpTableEncoding() const; + + /// TODO this must be controlled by options like -malign-int and -mshort + Align getStackAlignment() const { return Align(stackAlignment); } + + /// getSlotSize - Stack slot size in bytes. + unsigned getSlotSize() const { return 4; } + + M68kSubtarget &initializeSubtargetDependencies(StringRef CPU, Triple TT, + StringRef FS, + const M68kTargetMachine &TM); + + const SelectionDAGTargetInfo *getSelectionDAGInfo() const override { + return &TSInfo; + } + + const M68kInstrInfo *getInstrInfo() const override { return &InstrInfo; } + + const M68kFrameLowering *getFrameLowering() const override { + return &FrameLowering; + } + + const M68kRegisterInfo *getRegisterInfo() const override { + return &InstrInfo.getRegisterInfo(); + } + + const M68kTargetLowering *getTargetLowering() const override { + return &TLInfo; + } + + const InstrItineraryData *getInstrItineraryData() const override { + return &InstrItins; + } +}; +} // namespace llvm + +#endif Index: llvm/lib/Target/M68k/M68kSubtarget.cpp =================================================================== --- /dev/null +++ llvm/lib/Target/M68k/M68kSubtarget.cpp @@ -0,0 +1,238 @@ +//===-- M68kSubtarget.cpp - M68k Subtarget Information ------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file implements the M68k specific subclass of TargetSubtargetInfo. +/// +//===----------------------------------------------------------------------===// + +#include "M68kSubtarget.h" + +#include "M68k.h" +#include "M68kMachineFunction.h" +#include "M68kRegisterInfo.h" +#include "M68kTargetMachine.h" + +#include "llvm/CodeGen/MachineJumpTableInfo.h" +#include "llvm/IR/Attributes.h" +#include "llvm/IR/Function.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/TargetRegistry.h" + +using namespace llvm; + +#define DEBUG_TYPE "m68k-subtarget" + +#define GET_SUBTARGETINFO_TARGET_DESC +#define GET_SUBTARGETINFO_CTOR +#include "M68kGenSubtargetInfo.inc" + +extern bool FixGlobalBaseReg; + +/// Select the M68k CPU for the given triple and cpu name. +static StringRef selectM68kCPU(Triple TT, StringRef CPU) { + if (CPU.empty() || CPU == "generic") { + CPU = "M68000"; + } + return CPU; +} + +void M68kSubtarget::anchor() {} + +M68kSubtarget::M68kSubtarget(const Triple &TT, StringRef CPU, StringRef FS, + const M68kTargetMachine &TM) + : M68kGenSubtargetInfo(TT, CPU, /*TuneCPU*/ CPU, FS), TM(TM), TSInfo(), + InstrInfo(initializeSubtargetDependencies(CPU, TT, FS, TM)), + FrameLowering(*this, this->getStackAlignment()), TLInfo(TM, *this), + TargetTriple(TT) {} + +bool M68kSubtarget::isPositionIndependent() const { + return TM.isPositionIndependent(); +} + +bool M68kSubtarget::isLegalToCallImmediateAddr() const { return true; } + +bool M68kSubtarget::abiUsesSoftFloat() const { return true; } + +M68kSubtarget &M68kSubtarget::initializeSubtargetDependencies( + StringRef CPU, Triple TT, StringRef FS, const M68kTargetMachine &TM) { + std::string CPUName = selectM68kCPU(TT, CPU).str(); + + // Parse features string. + ParseSubtargetFeatures(CPUName, CPUName, FS); + + // Initialize scheduling itinerary for the specified CPU. + InstrItins = getInstrItineraryForCPU(CPUName); + + // Default stack alignment is 8 bytes, ??? Do I need this override? + // if (StackAlignOverride) + // stackAlignment = StackAlignOverride; + // else + stackAlignment = 8; + + return *this; +} + +//===----------------------------------------------------------------------===// +// Code Model +// +// Key assumptions: +// - Whenever possible we use pc-rel encoding since it is smaller(16 bit) than +// absolute(32 bit). +// - GOT is reachable within 16 bit offset for both Small and Medium models. +// - Code section is reachable within 16 bit offset for both models. +// +// ---------------------+-------------------------+-------------------------- +// | Small | Medium +// +-------------------------+------------+------------- +// | Static | PIC | Static | PIC +// ---------------------+------------+------------+------------+------------- +// branch | pc-rel | pc-rel | pc-rel | pc-rel +// ---------------------+------------+------------+------------+------------- +// call global | @PLT | @PLT | @PLT | @PLT +// ---------------------+------------+------------+------------+------------- +// call internal | pc-rel | pc-rel | pc-rel | pc-rel +// ---------------------+------------+------------+------------+------------- +// data local | pc-rel | pc-rel | ~pc-rel | ^pc-rel +// ---------------------+------------+------------+------------+------------- +// data local big* | pc-rel | pc-rel | absolute | @GOTOFF +// ---------------------+------------+------------+------------+------------- +// data global | pc-rel | @GOTPCREL | ~pc-rel | @GOTPCREL +// ---------------------+------------+------------+------------+------------- +// data global big* | pc-rel | @GOTPCREL | absolute | @GOTPCREL +// ---------------------+------------+------------+------------+------------- +// +// * Big data potentially cannot be reached within 16 bit offset and requires +// special handling for old(x00 and x10) CPUs. Normally these symbols go into +// separate .ldata section which mapped after normal .data and .text, but I +// don't really know how this must be done for M68k atm... will try to dig +// this info out from GCC. For now CPUs prior to M68020 will use static ref +// for Static Model and @GOT based references for PIC. +// +// ~ These are absolute for older CPUs for now. +// ^ These are @GOTOFF for older CPUs for now. +//===----------------------------------------------------------------------===// + +/// Classify a blockaddress reference for the current subtarget according to how +/// we should reference it in a non-pcrel context. +unsigned char M68kSubtarget::classifyBlockAddressReference() const { + // Unless we start to support Large Code Model branching is always pc-rel + return M68kII::MO_PC_RELATIVE_ADDRESS; +} + +unsigned char +M68kSubtarget::classifyLocalReference(const GlobalValue *GV) const { + switch (TM.getCodeModel()) { + default: + llvm_unreachable("Unsupported code model"); + case CodeModel::Small: + case CodeModel::Kernel: { + return M68kII::MO_PC_RELATIVE_ADDRESS; + } + case CodeModel::Medium: { + if (isPositionIndependent()) { + // On M68020 and better we can fit big any data offset into dips field. + if (atLeastM68020()) { + return M68kII::MO_PC_RELATIVE_ADDRESS; + } + // Otherwise we could check the data size and make sure it will fit into + // 16 bit offset. For now we will be conservative and go with @GOTOFF + return M68kII::MO_GOTOFF; + } else { + if (atLeastM68020()) { + return M68kII::MO_PC_RELATIVE_ADDRESS; + } + return M68kII::MO_ABSOLUTE_ADDRESS; + } + } + } +} + +unsigned char M68kSubtarget::classifyExternalReference(const Module &M) const { + if (TM.shouldAssumeDSOLocal(M, nullptr)) + return classifyLocalReference(nullptr); + + if (isPositionIndependent()) + return M68kII::MO_GOTPCREL; + + return M68kII::MO_GOT; +} + +unsigned char +M68kSubtarget::classifyGlobalReference(const GlobalValue *GV) const { + return classifyGlobalReference(GV, *GV->getParent()); +} + +unsigned char M68kSubtarget::classifyGlobalReference(const GlobalValue *GV, + const Module &M) const { + if (TM.shouldAssumeDSOLocal(M, GV)) + return classifyLocalReference(GV); + + switch (TM.getCodeModel()) { + default: + llvm_unreachable("Unsupported code model"); + case CodeModel::Small: + case CodeModel::Kernel: { + if (isPositionIndependent()) { + return M68kII::MO_GOTPCREL; + } else { + return M68kII::MO_PC_RELATIVE_ADDRESS; + } + } + case CodeModel::Medium: { + if (isPositionIndependent()) + return M68kII::MO_GOTPCREL; + + if (atLeastM68020()) + return M68kII::MO_PC_RELATIVE_ADDRESS; + + return M68kII::MO_ABSOLUTE_ADDRESS; + } + } +} + +unsigned M68kSubtarget::getJumpTableEncoding() const { + if (isPositionIndependent()) { + // The only time we want to use GOTOFF(used when with EK_Custom32) is when + // the potential delta between the jump target and table base can be larger + // than displacement field, which is True for older CPUs(16 bit disp) + // in Medium model(can have large data way beyond 16 bit). + if (TM.getCodeModel() == CodeModel::Medium && !atLeastM68020()) + return MachineJumpTableInfo::EK_Custom32; + + return MachineJumpTableInfo::EK_LabelDifference32; + } + + // In non-pic modes, just use the address of a block. + return MachineJumpTableInfo::EK_BlockAddress; +} + +unsigned char +M68kSubtarget::classifyGlobalFunctionReference(const GlobalValue *GV) const { + return classifyGlobalFunctionReference(GV, *GV->getParent()); +} + +unsigned char +M68kSubtarget::classifyGlobalFunctionReference(const GlobalValue *GV, + const Module &M) const { + // local always use pc-rel referencing + if (TM.shouldAssumeDSOLocal(M, GV)) + return M68kII::MO_NO_FLAG; + + // If the function is marked as non-lazy, generate an indirect call + // which loads from the GOT directly. This avoids run-time overhead + // at the cost of eager binding. + auto *F = dyn_cast_or_null(GV); + if (F && F->hasFnAttribute(Attribute::NonLazyBind)) { + return M68kII::MO_GOTPCREL; + } + + // otherwise linker will figure this out + return M68kII::MO_PLT; +} Index: llvm/lib/Target/M68k/M68kTargetMachine.h =================================================================== --- /dev/null +++ llvm/lib/Target/M68k/M68kTargetMachine.h @@ -0,0 +1,56 @@ +//===-- M68kTargetMachine.h - Define TargetMachine for M68k ----- C++ -===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file declares the M68k specific subclass of TargetMachine. +/// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIB_TARGET_M68K_M68KTARGETMACHINE_H +#define LLVM_LIB_TARGET_M68K_M68KTARGETMACHINE_H + +#include "M68kSubtarget.h" +#include "MCTargetDesc/M68kMCTargetDesc.h" + +#include "llvm/CodeGen/Passes.h" +#include "llvm/CodeGen/SelectionDAGISel.h" +#include "llvm/CodeGen/TargetFrameLowering.h" +#include "llvm/Target/TargetMachine.h" + +namespace llvm { +class formatted_raw_ostream; +class M68kRegisterInfo; + +class M68kTargetMachine : public LLVMTargetMachine { + std::unique_ptr TLOF; + M68kSubtarget Subtarget; + + mutable StringMap> SubtargetMap; + +public: + M68kTargetMachine(const Target &T, const Triple &TT, StringRef CPU, + StringRef FS, const TargetOptions &Options, + Optional RM, Optional CM, + CodeGenOpt::Level OL, bool JIT); + + ~M68kTargetMachine() override; + + const M68kSubtarget *getSubtargetImpl() const { return &Subtarget; } + + const M68kSubtarget *getSubtargetImpl(const Function &F) const override; + + // Pass Pipeline Configuration + TargetPassConfig *createPassConfig(PassManagerBase &PM) override; + + TargetLoweringObjectFile *getObjFileLowering() const override { + return TLOF.get(); + } +}; +} // namespace llvm + +#endif Index: llvm/lib/Target/M68k/M68kTargetMachine.cpp =================================================================== --- llvm/lib/Target/M68k/M68kTargetMachine.cpp +++ llvm/lib/Target/M68k/M68kTargetMachine.cpp @@ -11,7 +11,153 @@ /// //===----------------------------------------------------------------------===// -/// This is just a placeholder to make current -/// commit buildable. Body of this function will -/// be filled in later commits -extern "C" void LLVMInitializeM68kTarget() {} +#include "M68kTargetMachine.h" +#include "M68k.h" + +#include "M68kSubtarget.h" +#include "M68kTargetObjectFile.h" + +#include "llvm/CodeGen/Passes.h" +#include "llvm/CodeGen/TargetPassConfig.h" +#include "llvm/IR/LegacyPassManager.h" +#include "llvm/Support/TargetRegistry.h" +#include + +using namespace llvm; + +#define DEBUG_TYPE "m68k" + +extern "C" void LLVMInitializeM68kTarget() { + RegisterTargetMachine X(TheM68kTarget); +} + +namespace { + +// FIXME This layout is true for M68000 original cpu, other variants will +// affect DL computation +std::string computeDataLayout(const Triple &TT, StringRef CPU, + const TargetOptions &Options) { + std::string Ret = ""; + // M68k is Big Endian + Ret += "E"; + + // FIXME #28 how to wire it with the used object format? + Ret += "-m:e"; + + // M68k pointers are always 32 bit wide even for 16 bit cpus + Ret += "-p:32:32"; + + // M68k requires i8 to align on 2 byte boundry + Ret += "-i8:8:8-i16:16:16-i32:32:32"; + + // FIXME #29 no floats at the moment + + // The registers can hold 8, 16, 32 bits + Ret += "-n8:16:32"; + + // Aggregates are 32 bit aligned and stack is 16 bit aligned + Ret += "-a:0:32-S16"; + + return Ret; +} + +Reloc::Model getEffectiveRelocModel(const Triple &TT, + Optional RM) { + // If not defined we default to static + if (!RM.hasValue()) { + return Reloc::Static; + } + + return *RM; +} + +CodeModel::Model getEffectiveCodeModel(Optional CM, + bool JIT) { + if (!CM) { + return CodeModel::Small; + } else if (CM == CodeModel::Large) { + llvm_unreachable("Large code model is not supported"); + } else if (CM == CodeModel::Kernel) { + // FIXME #31 Kernel afaik is small cm plus some weird binding + llvm_unreachable("Kernel code model is not supported"); + } + return CM.getValue(); +} +} // end anonymous namespace + +M68kTargetMachine::M68kTargetMachine(const Target &T, const Triple &TT, + StringRef CPU, StringRef FS, + const TargetOptions &Options, + Optional RM, + Optional CM, + CodeGenOpt::Level OL, bool JIT) + : LLVMTargetMachine(T, computeDataLayout(TT, CPU, Options), TT, CPU, FS, + Options, getEffectiveRelocModel(TT, RM), + ::getEffectiveCodeModel(CM, JIT), OL), + TLOF(std::make_unique()), + Subtarget(TT, CPU, FS, *this) { + initAsmInfo(); +} + +M68kTargetMachine::~M68kTargetMachine() {} + +const M68kSubtarget * +M68kTargetMachine::getSubtargetImpl(const Function &F) const { + Attribute CPUAttr = F.getFnAttribute("target-cpu"); + Attribute FSAttr = F.getFnAttribute("target-features"); + + auto CPU = CPUAttr.isValid() ? CPUAttr.getValueAsString().str() : TargetCPU; + auto FS = FSAttr.isValid() ? FSAttr.getValueAsString().str() : TargetFS; + + auto &I = SubtargetMap[CPU + FS]; + if (!I) { + // This needs to be done before we create a new subtarget since any + // creation will depend on the TM and the code generation flags on the + // function that reside in TargetOptions. + resetTargetOptions(F); + I = std::make_unique(TargetTriple, CPU, FS, *this); + } + return I.get(); +} + +//===----------------------------------------------------------------------===// +// Pass Pipeline Configuration +//===----------------------------------------------------------------------===// + +namespace { +class M68kPassConfig : public TargetPassConfig { +public: + M68kPassConfig(M68kTargetMachine &TM, PassManagerBase &PM) + : TargetPassConfig(TM, PM) {} + + M68kTargetMachine &getM68kTargetMachine() const { + return getTM(); + } + + const M68kSubtarget &getM68kSubtarget() const { + return *getM68kTargetMachine().getSubtargetImpl(); + } + + bool addInstSelector() override; + void addPreSched2() override; + void addPreEmitPass() override; +}; +} // namespace + +TargetPassConfig *M68kTargetMachine::createPassConfig(PassManagerBase &PM) { + return new M68kPassConfig(*this, PM); +} + +bool M68kPassConfig::addInstSelector() { + // Install an instruction selector. + addPass(createM68kISelDag(getM68kTargetMachine())); + addPass(createM68kGlobalBaseRegPass()); + return false; +} + +void M68kPassConfig::addPreSched2() { addPass(createM68kExpandPseudoPass()); } + +void M68kPassConfig::addPreEmitPass() { + addPass(createM68kCollapseMOVEMPass()); + // addPass(createM68kConvertMOVToMOVMPass()); +} Index: llvm/lib/Target/M68k/M68kTargetObjectFile.h =================================================================== --- /dev/null +++ llvm/lib/Target/M68k/M68kTargetObjectFile.h @@ -0,0 +1,33 @@ +//===-- M68kELFTargetObjectFile.h - M68k Object Info ---------*- C++ -====// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file contains declarations for M68k ELF object file lowering. +/// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIB_TARGET_M68K_M68KTARGETOBJECTFILE_H +#define LLVM_LIB_TARGET_M68K_M68KTARGETOBJECTFILE_H + +#include "M68kTargetMachine.h" + +#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" + +namespace llvm { +class M68kTargetMachine; +class M68kELFTargetObjectFile : public TargetLoweringObjectFileELF { + const M68kTargetMachine *TM; + MCSection *SmallDataSection; + MCSection *SmallBSSSection; + +public: + void Initialize(MCContext &Ctx, const TargetMachine &TM) override; +}; +} // end namespace llvm + +#endif Index: llvm/lib/Target/M68k/M68kTargetObjectFile.cpp =================================================================== --- /dev/null +++ llvm/lib/Target/M68k/M68kTargetObjectFile.cpp @@ -0,0 +1,48 @@ +//===-- M68kELFTargetObjectFile.cpp - M68k Object Files -----*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file contains definitions for M68k ELF object file lowering. +/// +//===----------------------------------------------------------------------===// + +#include "M68kTargetObjectFile.h" + +#include "M68kSubtarget.h" +#include "M68kTargetMachine.h" + +#include "llvm/BinaryFormat/ELF.h" +#include "llvm/IR/DataLayout.h" +#include "llvm/IR/DerivedTypes.h" +#include "llvm/IR/GlobalVariable.h" +#include "llvm/MC/MCContext.h" +#include "llvm/MC/MCSectionELF.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Target/TargetMachine.h" + +using namespace llvm; + +static cl::opt SSThreshold( + "m68k-ssection-threshold", cl::Hidden, + cl::desc("Small data and bss section threshold size (default=8)"), + cl::init(8)); + +void M68kELFTargetObjectFile::Initialize(MCContext &Ctx, + const TargetMachine &TM) { + TargetLoweringObjectFileELF::Initialize(Ctx, TM); + InitializeELF(TM.Options.UseInitArray); + + this->TM = &static_cast(TM); + + // FIXME do we need `.sdata` and `.sbss` explicitly? + SmallDataSection = getContext().getELFSection( + ".sdata", ELF::SHT_PROGBITS, ELF::SHF_WRITE | ELF::SHF_ALLOC); + + SmallBSSSection = getContext().getELFSection(".sbss", ELF::SHT_NOBITS, + ELF::SHF_WRITE | ELF::SHF_ALLOC); +}