Index: include/llvm/CodeGen/MachineBasicBlock.h =================================================================== --- include/llvm/CodeGen/MachineBasicBlock.h +++ include/llvm/CodeGen/MachineBasicBlock.h @@ -114,6 +114,10 @@ /// branch. bool AddressTaken = false; + /// Indicate that this basic block needs its symbol be emitted regardless of + /// whether the flow just falls-through to it. + bool LabelMustBeEmitted = false; + /// Indicate that this basic block is the entry block of an EH scope, i.e., /// the block that used to have a catchpad or cleanuppad instruction in the /// LLVM IR. @@ -158,6 +162,13 @@ /// branch. void setHasAddressTaken() { AddressTaken = true; } + /// Test whether this block must have its label emitted. + bool hasLabelMustBeEmitted() const { return LabelMustBeEmitted; } + + /// Set this block to reflect that, regardless how we flow to it, we need + /// its label be emitted. + void setLabelMustBeEmitted() { LabelMustBeEmitted = true; } + /// Return the MachineFunction containing this basic block. const MachineFunction *getParent() const { return xParent; } MachineFunction *getParent() { return xParent; } Index: lib/CodeGen/AsmPrinter/AsmPrinter.cpp =================================================================== --- lib/CodeGen/AsmPrinter/AsmPrinter.cpp +++ lib/CodeGen/AsmPrinter/AsmPrinter.cpp @@ -2880,13 +2880,16 @@ // Print the main label for the block. if (MBB.pred_empty() || - (isBlockOnlyReachableByFallthrough(&MBB) && !MBB.isEHFuncletEntry())) { + (isBlockOnlyReachableByFallthrough(&MBB) && !MBB.isEHFuncletEntry() && + !MBB.hasLabelMustBeEmitted())) { if (isVerbose()) { // NOTE: Want this comment at start of line, don't emit with AddComment. OutStreamer->emitRawComment(" %bb." + Twine(MBB.getNumber()) + ":", false); } } else { + if (isVerbose() && MBB.hasLabelMustBeEmitted()) + OutStreamer->AddComment("Label of block must be emitted"); OutStreamer->EmitLabel(MBB.getSymbol()); } } Index: lib/Target/RISCV/RISCVExpandPseudoInsts.cpp =================================================================== --- lib/Target/RISCV/RISCVExpandPseudoInsts.cpp +++ lib/Target/RISCV/RISCVExpandPseudoInsts.cpp @@ -54,6 +54,9 @@ bool expandAtomicCmpXchg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, bool IsMasked, int Width, MachineBasicBlock::iterator &NextMBBI); + bool expandLoadLocalAddress(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MBBI, + MachineBasicBlock::iterator &NextMBBI); }; char RISCVExpandPseudo::ID = 0; @@ -117,6 +120,8 @@ return expandAtomicCmpXchg(MBB, MBBI, false, 64, NextMBBI); case RISCV::PseudoMaskedCmpXchg32: return expandAtomicCmpXchg(MBB, MBBI, true, 32, NextMBBI); + case RISCV::PseudoLLA: + return expandLoadLocalAddress(MBB, MBBI, NextMBBI); } return false; @@ -597,6 +602,46 @@ return true; } +bool RISCVExpandPseudo::expandLoadLocalAddress( + MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, + MachineBasicBlock::iterator &NextMBBI) { + MachineFunction *MF = MBB.getParent(); + MachineInstr &MI = *MBBI; + DebugLoc DL = MI.getDebugLoc(); + + unsigned DestReg = MI.getOperand(0).getReg(); + const MachineOperand &Symbol = MI.getOperand(1); + + MachineBasicBlock *NewMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock()); + + // Tell AsmPrinter that we unconditionally want the symbol of this label to be + // emitted. + NewMBB->setLabelMustBeEmitted(); + + MF->insert(++MBB.getIterator(), NewMBB); + + BuildMI(NewMBB, DL, TII->get(RISCV::AUIPC), DestReg) + .addDisp(Symbol, 0, RISCVII::MO_PCREL_HI); + BuildMI(NewMBB, DL, TII->get(RISCV::ADDI), DestReg) + .addReg(DestReg) + .addMBB(NewMBB, RISCVII::MO_PCREL_LO); + + // Move all the rest of the instructions to NewMBB. + NewMBB->splice(NewMBB->end(), &MBB, std::next(MBBI), MBB.end()); + // Update machine-CFG edges. + NewMBB->transferSuccessorsAndUpdatePHIs(&MBB); + // Make the original basic block fall-through to the new. + MBB.addSuccessor(NewMBB); + + // Make sure live-ins are correctly attached to this new basic block. + LivePhysRegs LiveRegs; + computeAndAddLiveIns(LiveRegs, *NewMBB); + + NextMBBI = MBB.end(); + MI.eraseFromParent(); + return true; +} + } // end of anonymous namespace INITIALIZE_PASS(RISCVExpandPseudo, "riscv-expand-pseudo", Index: lib/Target/RISCV/RISCVISelLowering.h =================================================================== --- lib/Target/RISCV/RISCVISelLowering.h +++ lib/Target/RISCV/RISCVISelLowering.h @@ -133,6 +133,10 @@ Type *Ty) const override { return true; } + + template + SDValue getAddr(NodeTy *N, SelectionDAG &DAG) const; + SDValue lowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const; SDValue lowerBlockAddress(SDValue Op, SelectionDAG &DAG) const; SDValue lowerConstantPool(SDValue Op, SelectionDAG &DAG) const; Index: lib/Target/RISCV/RISCVISelLowering.cpp =================================================================== --- lib/Target/RISCV/RISCVISelLowering.cpp +++ lib/Target/RISCV/RISCVISelLowering.cpp @@ -355,72 +355,90 @@ } } +static SDValue getTargetNode(GlobalAddressSDNode *N, SDLoc DL, EVT Ty, + SelectionDAG &DAG, unsigned Flags) { + return DAG.getTargetGlobalAddress(N->getGlobal(), DL, Ty, 0, Flags); +} + +static SDValue getTargetNode(BlockAddressSDNode *N, SDLoc DL, EVT Ty, + SelectionDAG &DAG, unsigned Flags) { + return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, N->getOffset(), + Flags); +} + +static SDValue getTargetNode(ConstantPoolSDNode *N, SDLoc DL, EVT Ty, + SelectionDAG &DAG, unsigned Flags) { + return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlignment(), + N->getOffset(), Flags); +} + +template +SDValue RISCVTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG) const { + SDLoc DL(N); + EVT Ty = getPointerTy(DAG.getDataLayout()); + + switch (getTargetMachine().getCodeModel()) { + default: + report_fatal_error("Unsupported code model for lowering"); + case CodeModel::Small: { + // Generate a sequence for accessing addresses within the first 2 GiB of + // address space. This generates the pattern (addi (lui %hi(sym)) %lo(sym)). + SDValue AddrHi = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_HI); + SDValue AddrLo = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_LO); + SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0); + return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, AddrLo), 0); + } + case CodeModel::Medium: { + // Generate a sequence for accessing addresses within any 2GiB range within + // the address space. This generates the pattern (PseudoLLA sym), which + // expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)). + SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0); + return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0); + } + } +} + SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const { SDLoc DL(Op); EVT Ty = Op.getValueType(); GlobalAddressSDNode *N = cast(Op); - const GlobalValue *GV = N->getGlobal(); int64_t Offset = N->getOffset(); MVT XLenVT = Subtarget.getXLenVT(); if (isPositionIndependent()) report_fatal_error("Unable to lowerGlobalAddress"); + + SDValue Addr = getAddr(N, DAG); + // In order to maximise the opportunity for common subexpression elimination, // emit a separate ADD node for the global address offset instead of folding // it in the global address node. Later peephole optimisations may choose to // fold it back in when profitable. - SDValue GAHi = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_HI); - SDValue GALo = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_LO); - SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, GAHi), 0); - SDValue MNLo = - SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, GALo), 0); if (Offset != 0) - return DAG.getNode(ISD::ADD, DL, Ty, MNLo, + return DAG.getNode(ISD::ADD, DL, Ty, Addr, DAG.getConstant(Offset, DL, XLenVT)); - return MNLo; + return Addr; } SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op, SelectionDAG &DAG) const { - SDLoc DL(Op); - EVT Ty = Op.getValueType(); BlockAddressSDNode *N = cast(Op); - const BlockAddress *BA = N->getBlockAddress(); - int64_t Offset = N->getOffset(); if (isPositionIndependent()) report_fatal_error("Unable to lowerBlockAddress"); - SDValue BAHi = DAG.getTargetBlockAddress(BA, Ty, Offset, RISCVII::MO_HI); - SDValue BALo = DAG.getTargetBlockAddress(BA, Ty, Offset, RISCVII::MO_LO); - SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, BAHi), 0); - SDValue MNLo = - SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, BALo), 0); - return MNLo; + return getAddr(N, DAG); } SDValue RISCVTargetLowering::lowerConstantPool(SDValue Op, SelectionDAG &DAG) const { - SDLoc DL(Op); - EVT Ty = Op.getValueType(); ConstantPoolSDNode *N = cast(Op); - const Constant *CPA = N->getConstVal(); - int64_t Offset = N->getOffset(); - unsigned Alignment = N->getAlignment(); - - if (!isPositionIndependent()) { - SDValue CPAHi = - DAG.getTargetConstantPool(CPA, Ty, Alignment, Offset, RISCVII::MO_HI); - SDValue CPALo = - DAG.getTargetConstantPool(CPA, Ty, Alignment, Offset, RISCVII::MO_LO); - SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, CPAHi), 0); - SDValue MNLo = - SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, CPALo), 0); - return MNLo; - } else { + + if (isPositionIndependent()) report_fatal_error("Unable to lowerConstantPool"); - } + + return getAddr(N, DAG); } SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const { Index: lib/Target/RISCV/RISCVInstrInfo.cpp =================================================================== --- lib/Target/RISCV/RISCVInstrInfo.cpp +++ lib/Target/RISCV/RISCVInstrInfo.cpp @@ -438,6 +438,7 @@ return 0; case RISCV::PseudoCALL: case RISCV::PseudoTAIL: + case RISCV::PseudoLLA: return 8; case TargetOpcode::INLINEASM: case TargetOpcode::INLINEASM_BR: { Index: lib/Target/RISCV/RISCVMCInstLower.cpp =================================================================== --- lib/Target/RISCV/RISCVMCInstLower.cpp +++ lib/Target/RISCV/RISCVMCInstLower.cpp @@ -42,6 +42,12 @@ case RISCVII::MO_HI: Kind = RISCVMCExpr::VK_RISCV_HI; break; + case RISCVII::MO_PCREL_LO: + Kind = RISCVMCExpr::VK_RISCV_PCREL_LO; + break; + case RISCVII::MO_PCREL_HI: + Kind = RISCVMCExpr::VK_RISCV_PCREL_HI; + break; } const MCExpr *ME = Index: lib/Target/RISCV/Utils/RISCVBaseInfo.h =================================================================== --- lib/Target/RISCV/Utils/RISCVBaseInfo.h +++ lib/Target/RISCV/Utils/RISCVBaseInfo.h @@ -50,6 +50,7 @@ MO_None, MO_LO, MO_HI, + MO_PCREL_LO, MO_PCREL_HI, }; } // namespace RISCVII Index: test/CodeGen/RISCV/codemodel-lowering.ll =================================================================== --- /dev/null +++ test/CodeGen/RISCV/codemodel-lowering.ll @@ -0,0 +1,80 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+f -code-model=small -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV32I-SMALL +; RUN: llc -mtriple=riscv32 -mattr=+f -code-model=medium -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV32I-MEDIUM + +; Check lowering of globals +@G = global i32 0 + +define i32 @lower_global(i32 %a) nounwind { +; RV32I-SMALL-LABEL: lower_global: +; RV32I-SMALL: # %bb.0: +; RV32I-SMALL-NEXT: lui a0, %hi(G) +; RV32I-SMALL-NEXT: lw a0, %lo(G)(a0) +; RV32I-SMALL-NEXT: ret +; +; RV32I-MEDIUM-LABEL: lower_global: +; RV32I-MEDIUM: # %bb.0: +; RV32I-MEDIUM-NEXT: .LBB0_1: # Label of block must be emitted +; RV32I-MEDIUM-NEXT: auipc a0, %pcrel_hi(G) +; RV32I-MEDIUM-NEXT: addi a0, a0, %pcrel_lo(.LBB0_1) +; RV32I-MEDIUM-NEXT: lw a0, 0(a0) +; RV32I-MEDIUM-NEXT: ret + %1 = load volatile i32, i32* @G + ret i32 %1 +} + +; Check lowering of blockaddresses + +@addr = global i8* null + +define void @lower_blockaddress() nounwind { +; RV32I-SMALL-LABEL: lower_blockaddress: +; RV32I-SMALL: # %bb.0: +; RV32I-SMALL-NEXT: lui a0, %hi(addr) +; RV32I-SMALL-NEXT: addi a1, zero, 1 +; RV32I-SMALL-NEXT: sw a1, %lo(addr)(a0) +; RV32I-SMALL-NEXT: ret +; +; RV32I-MEDIUM-LABEL: lower_blockaddress: +; RV32I-MEDIUM: # %bb.0: +; RV32I-MEDIUM-NEXT: .LBB1_1: # Label of block must be emitted +; RV32I-MEDIUM-NEXT: auipc a0, %pcrel_hi(addr) +; RV32I-MEDIUM-NEXT: addi a0, a0, %pcrel_lo(.LBB1_1) +; RV32I-MEDIUM-NEXT: addi a1, zero, 1 +; RV32I-MEDIUM-NEXT: sw a1, 0(a0) +; RV32I-MEDIUM-NEXT: ret + store volatile i8* blockaddress(@lower_blockaddress, %block), i8** @addr + ret void + +block: + unreachable +} + +; Check lowering of constantpools + +define float @lower_constantpool(float %a) nounwind { +; RV32I-SMALL-LABEL: lower_constantpool: +; RV32I-SMALL: # %bb.0: +; RV32I-SMALL-NEXT: fmv.w.x ft0, a0 +; RV32I-SMALL-NEXT: lui a0, %hi(.LCPI2_0) +; RV32I-SMALL-NEXT: addi a0, a0, %lo(.LCPI2_0) +; RV32I-SMALL-NEXT: flw ft1, 0(a0) +; RV32I-SMALL-NEXT: fadd.s ft0, ft0, ft1 +; RV32I-SMALL-NEXT: fmv.x.w a0, ft0 +; RV32I-SMALL-NEXT: ret +; +; RV32I-MEDIUM-LABEL: lower_constantpool: +; RV32I-MEDIUM: # %bb.0: +; RV32I-MEDIUM-NEXT: .LBB2_1: # Label of block must be emitted +; RV32I-MEDIUM-NEXT: auipc a1, %pcrel_hi(.LCPI2_0) +; RV32I-MEDIUM-NEXT: addi a1, a1, %pcrel_lo(.LBB2_1) +; RV32I-MEDIUM-NEXT: flw ft0, 0(a1) +; RV32I-MEDIUM-NEXT: fmv.w.x ft1, a0 +; RV32I-MEDIUM-NEXT: fadd.s ft0, ft1, ft0 +; RV32I-MEDIUM-NEXT: fmv.x.w a0, ft0 +; RV32I-MEDIUM-NEXT: ret + %1 = fadd float %a, 1.0 + ret float %1 +}