Index: include/llvm/CodeGen/MachineBasicBlock.h =================================================================== --- include/llvm/CodeGen/MachineBasicBlock.h +++ include/llvm/CodeGen/MachineBasicBlock.h @@ -115,6 +115,10 @@ /// branch. bool AddressTaken = false; + /// Indicate that this basic block needs its symbol be emitted regardless of + /// whether the flow just falls-through to it. + bool LabelMustBeEmitted = false; + /// Indicate that this basic block is the entry block of an EH scope, i.e., /// the block that used to have a catchpad or cleanuppad instruction in the /// LLVM IR. @@ -159,6 +163,13 @@ /// branch. void setHasAddressTaken() { AddressTaken = true; } + /// Test whether this block must have its label emitted. + bool hasLabelMustBeEmitted() const { return LabelMustBeEmitted; } + + /// Set this block to reflect that, regardless how we flow to it, we need + /// its label be emitted. + void setLabelMustBeEmitted() { LabelMustBeEmitted = true; } + /// Return the MachineFunction containing this basic block. const MachineFunction *getParent() const { return xParent; } MachineFunction *getParent() { return xParent; } Index: lib/CodeGen/AsmPrinter/AsmPrinter.cpp =================================================================== --- lib/CodeGen/AsmPrinter/AsmPrinter.cpp +++ lib/CodeGen/AsmPrinter/AsmPrinter.cpp @@ -2892,13 +2892,16 @@ // Print the main label for the block. if (MBB.pred_empty() || - (isBlockOnlyReachableByFallthrough(&MBB) && !MBB.isEHFuncletEntry())) { + (isBlockOnlyReachableByFallthrough(&MBB) && !MBB.isEHFuncletEntry() && + !MBB.hasLabelMustBeEmitted())) { if (isVerbose()) { // NOTE: Want this comment at start of line, don't emit with AddComment. OutStreamer->emitRawComment(" %bb." + Twine(MBB.getNumber()) + ":", false); } } else { + if (isVerbose() && MBB.hasLabelMustBeEmitted()) + OutStreamer->AddComment("Label of block must be emitted"); OutStreamer->EmitLabel(MBB.getSymbol()); } } Index: lib/Target/RISCV/RISCVExpandPseudoInsts.cpp =================================================================== --- lib/Target/RISCV/RISCVExpandPseudoInsts.cpp +++ lib/Target/RISCV/RISCVExpandPseudoInsts.cpp @@ -52,6 +52,8 @@ MachineBasicBlock::iterator MBBI, AtomicRMWInst::BinOp, bool IsMasked, int Width, MachineBasicBlock::iterator &NextMBBI); + bool expandAddrPCRel(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, + MachineBasicBlock::iterator &NextMBBI); }; char RISCVExpandPseudo::ID = 0; @@ -77,10 +79,51 @@ return Modified; } +bool RISCVExpandPseudo::expandAddrPCRel(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MBBI, + MachineBasicBlock::iterator &NextMBBI) { + MachineFunction *MF = MBB.getParent(); + MachineInstr &MI = *MBBI; + DebugLoc DL = MI.getDebugLoc(); + + unsigned DestReg = MI.getOperand(0).getReg(); + const MachineOperand &Symbol = MI.getOperand(1); + + MachineBasicBlock *NewMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock()); + + // Tell AsmPrinter that we unconditionally want the symbol of this label to be + // emitted. + NewMBB->setLabelMustBeEmitted(); + + MF->insert(++MBB.getIterator(), NewMBB); + + BuildMI(NewMBB, DL, TII->get(RISCV::AUIPC), DestReg).add(Symbol); + BuildMI(NewMBB, DL, TII->get(RISCV::ADDI), DestReg) + .addReg(DestReg) + .addMBB(NewMBB, RISCVII::MO_PCREL_LO); + + // Move all the rest of the instructions to NewMBB. + NewMBB->splice(NewMBB->end(), &MBB, std::next(MBBI), MBB.end()); + // Update machine-CFG edges. + NewMBB->transferSuccessorsAndUpdatePHIs(&MBB); + // Make the original basic block fall-through to the new. + MBB.addSuccessor(NewMBB); + + // Make sure live-ins are correctly attached to this new basic block. + LivePhysRegs LiveRegs; + computeAndAddLiveIns(LiveRegs, *NewMBB); + + NextMBBI = MBB.end(); + MI.eraseFromParent(); + return true; +} + bool RISCVExpandPseudo::expandMI(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, MachineBasicBlock::iterator &NextMBBI) { switch (MBBI->getOpcode()) { + case RISCV::PseudoAddrPCRel: + return expandAddrPCRel(MBB, MBBI, NextMBBI); case RISCV::PseudoAtomicLoadNand32: return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Nand, false, 32, NextMBBI); Index: lib/Target/RISCV/RISCVISelLowering.h =================================================================== --- lib/Target/RISCV/RISCVISelLowering.h +++ lib/Target/RISCV/RISCVISelLowering.h @@ -32,7 +32,8 @@ SELECT_CC, BuildPairF64, SplitF64, - TAIL + TAIL, + WRAPPER_PCREL }; } @@ -109,6 +110,10 @@ Type *Ty) const override { return true; } + + template + SDValue getAddr(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const; + SDValue lowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const; SDValue lowerBlockAddress(SDValue Op, SelectionDAG &DAG) const; SDValue lowerConstantPool(SDValue Op, SelectionDAG &DAG) const; Index: lib/Target/RISCV/RISCVISelLowering.cpp =================================================================== --- lib/Target/RISCV/RISCVISelLowering.cpp +++ lib/Target/RISCV/RISCVISelLowering.cpp @@ -318,26 +318,67 @@ } } +SDValue getTargetNode(GlobalAddressSDNode *N, SDLoc DL, EVT Ty, + SelectionDAG &DAG, unsigned Flags) { + return DAG.getTargetGlobalAddress(N->getGlobal(), DL, Ty, 0, Flags); +} + +SDValue getTargetNode(BlockAddressSDNode *N, SDLoc DL, EVT Ty, + SelectionDAG &DAG, unsigned Flags) { + return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, N->getOffset(), + Flags); +} + +SDValue getTargetNode(ConstantPoolSDNode *N, SDLoc DL, EVT Ty, + SelectionDAG &DAG, unsigned Flags) { + return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlignment(), + N->getOffset(), Flags); +} + +template +SDValue RISCVTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG, + unsigned Flags) const { + SDLoc DL(N); + EVT Ty = getPointerTy(DAG.getDataLayout()); + + switch (getTargetMachine().getCodeModel()) { + default: + report_fatal_error("Unsupported code model for lowering"); + case CodeModel::Small: { + // Generate a sequence for accessing addresses within the first 2 GiB of + // address space. This generates the pattern (addi (lui %hi(sym)) %lo(sym)). + SDValue AddrHi = getTargetNode(N, DL, Ty, DAG, Flags | RISCVII::MO_HI); + SDValue AddrLo = getTargetNode(N, DL, Ty, DAG, Flags | RISCVII::MO_LO); + SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0); + return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, AddrLo), 0); + } + case CodeModel::Medium: { + // Generate a sequence for accessing addresses within any 2GiB range within + // the address space. This generates the pattern + // (WrapperPCRel %pcrel_hi(sym)). + SDValue Addr = getTargetNode(N, DL, Ty, DAG, Flags | RISCVII::MO_PCREL_HI); + return DAG.getNode(RISCVISD::WRAPPER_PCREL, DL, Ty, Addr); + } + } +} + SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const { SDLoc DL(Op); EVT Ty = Op.getValueType(); GlobalAddressSDNode *N = cast(Op); - const GlobalValue *GV = N->getGlobal(); int64_t Offset = N->getOffset(); MVT XLenVT = Subtarget.getXLenVT(); if (isPositionIndependent()) report_fatal_error("Unable to lowerGlobalAddress"); + + SDValue MNLo = getAddr(N, DAG); + // In order to maximise the opportunity for common subexpression elimination, // emit a separate ADD node for the global address offset instead of folding // it in the global address node. Later peephole optimisations may choose to // fold it back in when profitable. - SDValue GAHi = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_HI); - SDValue GALo = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_LO); - SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, GAHi), 0); - SDValue MNLo = - SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, GALo), 0); if (Offset != 0) return DAG.getNode(ISD::ADD, DL, Ty, MNLo, DAG.getConstant(Offset, DL, XLenVT)); @@ -346,44 +387,22 @@ SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op, SelectionDAG &DAG) const { - SDLoc DL(Op); - EVT Ty = Op.getValueType(); BlockAddressSDNode *N = cast(Op); - const BlockAddress *BA = N->getBlockAddress(); - int64_t Offset = N->getOffset(); if (isPositionIndependent()) report_fatal_error("Unable to lowerBlockAddress"); - SDValue BAHi = DAG.getTargetBlockAddress(BA, Ty, Offset, RISCVII::MO_HI); - SDValue BALo = DAG.getTargetBlockAddress(BA, Ty, Offset, RISCVII::MO_LO); - SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, BAHi), 0); - SDValue MNLo = - SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, BALo), 0); - return MNLo; + return getAddr(N, DAG); } SDValue RISCVTargetLowering::lowerConstantPool(SDValue Op, SelectionDAG &DAG) const { - SDLoc DL(Op); - EVT Ty = Op.getValueType(); ConstantPoolSDNode *N = cast(Op); - const Constant *CPA = N->getConstVal(); - int64_t Offset = N->getOffset(); - unsigned Alignment = N->getAlignment(); - - if (!isPositionIndependent()) { - SDValue CPAHi = - DAG.getTargetConstantPool(CPA, Ty, Alignment, Offset, RISCVII::MO_HI); - SDValue CPALo = - DAG.getTargetConstantPool(CPA, Ty, Alignment, Offset, RISCVII::MO_LO); - SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, CPAHi), 0); - SDValue MNLo = - SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, CPALo), 0); - return MNLo; - } else { + + if (isPositionIndependent()) report_fatal_error("Unable to lowerConstantPool"); - } + + return getAddr(N, DAG); } SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const { @@ -1600,6 +1619,8 @@ return "RISCVISD::SplitF64"; case RISCVISD::TAIL: return "RISCVISD::TAIL"; + case RISCVISD::WRAPPER_PCREL: + return "RISCVISD::WRAPPER_PCREL"; } return nullptr; } Index: lib/Target/RISCV/RISCVInstrInfo.td =================================================================== --- lib/Target/RISCV/RISCVInstrInfo.td +++ lib/Target/RISCV/RISCVInstrInfo.td @@ -47,6 +47,7 @@ def Tail : SDNode<"RISCVISD::TAIL", SDT_RISCVCall, [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue, SDNPVariadic]>; +def WrapperPCRel : SDNode<"RISCVISD::WRAPPER_PCREL", SDTIntUnaryOp>; //===----------------------------------------------------------------------===// // Operand and SDNode transformation definitions. @@ -796,6 +797,17 @@ def PseudoLLA : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [], "lla", "$dst, $src">; +/// PC-relative addressing + +def PseudoAddrPCRel : Pseudo<(outs GPR:$dst), (ins ixlenimm:$addr), []>; + +def : Pat<(WrapperPCRel tglobaladdr:$addr), + (PseudoAddrPCRel tglobaladdr:$addr)>; +def : Pat<(WrapperPCRel tblockaddress:$addr), + (PseudoAddrPCRel tblockaddress:$addr)>; +def : Pat<(WrapperPCRel tconstpool:$addr), + (PseudoAddrPCRel tconstpool:$addr)>; + /// Loads multiclass LdPat { Index: lib/Target/RISCV/RISCVMCInstLower.cpp =================================================================== --- lib/Target/RISCV/RISCVMCInstLower.cpp +++ lib/Target/RISCV/RISCVMCInstLower.cpp @@ -43,6 +43,12 @@ case RISCVII::MO_HI: Kind = RISCVMCExpr::VK_RISCV_HI; break; + case RISCVII::MO_PCREL_LO: + Kind = RISCVMCExpr::VK_RISCV_PCREL_LO; + break; + case RISCVII::MO_PCREL_HI: + Kind = RISCVMCExpr::VK_RISCV_PCREL_HI; + break; } const MCExpr *ME = Index: lib/Target/RISCV/Utils/RISCVBaseInfo.h =================================================================== --- lib/Target/RISCV/Utils/RISCVBaseInfo.h +++ lib/Target/RISCV/Utils/RISCVBaseInfo.h @@ -50,6 +50,7 @@ MO_None, MO_LO, MO_HI, + MO_PCREL_LO, MO_PCREL_HI, }; } // namespace RISCVII Index: test/CodeGen/RISCV/codemodel-lowering.ll =================================================================== --- /dev/null +++ test/CodeGen/RISCV/codemodel-lowering.ll @@ -0,0 +1,80 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+f -code-model=small -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV32I-SMALL +; RUN: llc -mtriple=riscv32 -mattr=+f -code-model=medium -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV32I-MEDIUM + +; Check lowering of globals +@G = global i32 0 + +define i32 @lower_global(i32 %a) nounwind { +; RV32I-SMALL-LABEL: lower_global: +; RV32I-SMALL: # %bb.0: +; RV32I-SMALL-NEXT: lui a0, %hi(G) +; RV32I-SMALL-NEXT: lw a0, %lo(G)(a0) +; RV32I-SMALL-NEXT: ret +; +; RV32I-MEDIUM-LABEL: lower_global: +; RV32I-MEDIUM: # %bb.0: +; RV32I-MEDIUM-NEXT: .LBB0_1: # Label of block must be emitted +; RV32I-MEDIUM-NEXT: auipc a0, %pcrel_hi(G) +; RV32I-MEDIUM-NEXT: addi a0, a0, %pcrel_lo(.LBB0_1) +; RV32I-MEDIUM-NEXT: lw a0, 0(a0) +; RV32I-MEDIUM-NEXT: ret + %1 = load volatile i32, i32* @G + ret i32 %1 +} + +; Check lowering of blockaddresses + +@addr = global i8* null + +define void @lower_blockaddress() nounwind { +; RV32I-SMALL-LABEL: lower_blockaddress: +; RV32I-SMALL: # %bb.0: +; RV32I-SMALL-NEXT: lui a0, %hi(addr) +; RV32I-SMALL-NEXT: addi a1, zero, 1 +; RV32I-SMALL-NEXT: sw a1, %lo(addr)(a0) +; RV32I-SMALL-NEXT: ret +; +; RV32I-MEDIUM-LABEL: lower_blockaddress: +; RV32I-MEDIUM: # %bb.0: +; RV32I-MEDIUM-NEXT: .LBB1_1: # Label of block must be emitted +; RV32I-MEDIUM-NEXT: auipc a0, %pcrel_hi(addr) +; RV32I-MEDIUM-NEXT: addi a0, a0, %pcrel_lo(.LBB1_1) +; RV32I-MEDIUM-NEXT: addi a1, zero, 1 +; RV32I-MEDIUM-NEXT: sw a1, 0(a0) +; RV32I-MEDIUM-NEXT: ret + store volatile i8* blockaddress(@lower_blockaddress, %block), i8** @addr + ret void + +block: + unreachable +} + +; Check lowering of constantpools + +define float @lower_constantpool(float %a) nounwind { +; RV32I-SMALL-LABEL: lower_constantpool: +; RV32I-SMALL: # %bb.0: +; RV32I-SMALL-NEXT: fmv.w.x ft0, a0 +; RV32I-SMALL-NEXT: lui a0, %hi(.LCPI2_0) +; RV32I-SMALL-NEXT: addi a0, a0, %lo(.LCPI2_0) +; RV32I-SMALL-NEXT: flw ft1, 0(a0) +; RV32I-SMALL-NEXT: fadd.s ft0, ft0, ft1 +; RV32I-SMALL-NEXT: fmv.x.w a0, ft0 +; RV32I-SMALL-NEXT: ret +; +; RV32I-MEDIUM-LABEL: lower_constantpool: +; RV32I-MEDIUM: # %bb.0: +; RV32I-MEDIUM-NEXT: .LBB2_1: # Label of block must be emitted +; RV32I-MEDIUM-NEXT: auipc a1, %pcrel_hi(.LCPI2_0) +; RV32I-MEDIUM-NEXT: addi a1, a1, %pcrel_lo(.LBB2_1) +; RV32I-MEDIUM-NEXT: flw ft0, 0(a1) +; RV32I-MEDIUM-NEXT: fmv.w.x ft1, a0 +; RV32I-MEDIUM-NEXT: fadd.s ft0, ft1, ft0 +; RV32I-MEDIUM-NEXT: fmv.x.w a0, ft0 +; RV32I-MEDIUM-NEXT: ret + %1 = fadd float %a, 1.0 + ret float %1 +}