Index: lib/Target/RISCV/RISCVISelLowering.h =================================================================== --- lib/Target/RISCV/RISCVISelLowering.h +++ lib/Target/RISCV/RISCVISelLowering.h @@ -109,6 +109,12 @@ Type *Ty) const override { return true; } + + template + SDValue getAddrSmall(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const; + template + SDValue getAddrMedium(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const; + SDValue lowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const; SDValue lowerBlockAddress(SDValue Op, SelectionDAG &DAG) const; SDValue lowerConstantPool(SDValue Op, SelectionDAG &DAG) const; Index: lib/Target/RISCV/RISCVISelLowering.cpp =================================================================== --- lib/Target/RISCV/RISCVISelLowering.cpp +++ lib/Target/RISCV/RISCVISelLowering.cpp @@ -318,26 +318,83 @@ } } +SDValue getTargetNode(GlobalAddressSDNode *N, SDLoc DL, EVT Ty, + SelectionDAG &DAG, unsigned Flags) { + return DAG.getTargetGlobalAddress(N->getGlobal(), DL, Ty, 0, Flags); +} + +SDValue getTargetNode(BlockAddressSDNode *N, SDLoc DL, EVT Ty, + SelectionDAG &DAG, unsigned Flags) { + return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, N->getOffset(), + Flags); +} + +SDValue getTargetNode(ConstantPoolSDNode *N, SDLoc DL, EVT Ty, + SelectionDAG &DAG, unsigned Flags) { + return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlignment(), + N->getOffset(), Flags); +} + +// Generate a sequence for accessing addresses within the first 2 GiB of address +// space. This generates the pattern (addi (lui %hi(sym)) %lo(sym)). +template +SDValue RISCVTargetLowering::getAddrSmall(NodeTy *N, SelectionDAG &DAG, + unsigned Flags) const { + SDLoc DL(N); + EVT Ty = getPointerTy(DAG.getDataLayout()); + SDValue AddrHi = getTargetNode(N, DL, Ty, DAG, Flags | RISCVII::MO_HI); + SDValue AddrLo = getTargetNode(N, DL, Ty, DAG, Flags | RISCVII::MO_LO); + SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0); + SDValue MNLo = + SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, AddrLo), 0); + return MNLo; +} + +// Generate a sequence for accessing addresses within any 2GiB range within the +// address space. This generates the pattern +// (addi (auipc %pcrel_hi(sym)) %lo(sym)). +template +SDValue RISCVTargetLowering::getAddrMedium(NodeTy *N, SelectionDAG &DAG, + unsigned Flags) const { + SDLoc DL(N); + EVT Ty = getPointerTy(DAG.getDataLayout()); + SDValue AddrHi = getTargetNode(N, DL, Ty, DAG, Flags | RISCVII::MO_PCREL_HI); + SDValue AddrLo = getTargetNode(N, DL, Ty, DAG, Flags | RISCVII::MO_LO); + SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::AUIPC, DL, Ty, AddrHi), 0); + SDValue MNLo = + SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, AddrLo), 0); + return MNLo; +} + SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const { SDLoc DL(Op); EVT Ty = Op.getValueType(); GlobalAddressSDNode *N = cast(Op); - const GlobalValue *GV = N->getGlobal(); int64_t Offset = N->getOffset(); MVT XLenVT = Subtarget.getXLenVT(); if (isPositionIndependent()) report_fatal_error("Unable to lowerGlobalAddress"); + + SDValue MNLo; + switch (getTargetMachine().getCodeModel()) { + default: + // Use the 'medium' addressing strategy by default for now. + MNLo = getAddrMedium(N, DAG); + break; + case CodeModel::Tiny: + case CodeModel::Small: + MNLo = getAddrSmall(N, DAG); + break; + case CodeModel::Medium: + MNLo = getAddrMedium(N, DAG); + break; + } // In order to maximise the opportunity for common subexpression elimination, // emit a separate ADD node for the global address offset instead of folding // it in the global address node. Later peephole optimisations may choose to // fold it back in when profitable. - SDValue GAHi = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_HI); - SDValue GALo = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_LO); - SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, GAHi), 0); - SDValue MNLo = - SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, GALo), 0); if (Offset != 0) return DAG.getNode(ISD::ADD, DL, Ty, MNLo, DAG.getConstant(Offset, DL, XLenVT)); @@ -346,43 +403,39 @@ SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op, SelectionDAG &DAG) const { - SDLoc DL(Op); - EVT Ty = Op.getValueType(); BlockAddressSDNode *N = cast(Op); - const BlockAddress *BA = N->getBlockAddress(); - int64_t Offset = N->getOffset(); if (isPositionIndependent()) report_fatal_error("Unable to lowerBlockAddress"); - SDValue BAHi = DAG.getTargetBlockAddress(BA, Ty, Offset, RISCVII::MO_HI); - SDValue BALo = DAG.getTargetBlockAddress(BA, Ty, Offset, RISCVII::MO_LO); - SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, BAHi), 0); - SDValue MNLo = - SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, BALo), 0); - return MNLo; + switch (getTargetMachine().getCodeModel()) { + default: + // Use the 'medium' addressing strategy by default for now. + return getAddrMedium(N, DAG); + case CodeModel::Tiny: + case CodeModel::Small: + return getAddrSmall(N, DAG); + case CodeModel::Medium: + return getAddrMedium(N, DAG); + } } SDValue RISCVTargetLowering::lowerConstantPool(SDValue Op, SelectionDAG &DAG) const { - SDLoc DL(Op); - EVT Ty = Op.getValueType(); ConstantPoolSDNode *N = cast(Op); - const Constant *CPA = N->getConstVal(); - int64_t Offset = N->getOffset(); - unsigned Alignment = N->getAlignment(); - - if (!isPositionIndependent()) { - SDValue CPAHi = - DAG.getTargetConstantPool(CPA, Ty, Alignment, Offset, RISCVII::MO_HI); - SDValue CPALo = - DAG.getTargetConstantPool(CPA, Ty, Alignment, Offset, RISCVII::MO_LO); - SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, CPAHi), 0); - SDValue MNLo = - SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, CPALo), 0); - return MNLo; - } else { + + if (isPositionIndependent()) report_fatal_error("Unable to lowerConstantPool"); + + switch (getTargetMachine().getCodeModel()) { + default: + // Use the 'medium' addressing strategy by default for now. + return getAddrMedium(N, DAG); + case CodeModel::Tiny: + case CodeModel::Small: + return getAddrSmall(N, DAG); + case CodeModel::Medium: + return getAddrMedium(N, DAG); } } Index: lib/Target/RISCV/RISCVMCInstLower.cpp =================================================================== --- lib/Target/RISCV/RISCVMCInstLower.cpp +++ lib/Target/RISCV/RISCVMCInstLower.cpp @@ -37,6 +37,9 @@ case RISCVII::MO_None: Kind = RISCVMCExpr::VK_RISCV_None; break; + case RISCVII::MO_PCREL_HI: + Kind = RISCVMCExpr::VK_RISCV_PCREL_HI; + break; case RISCVII::MO_LO: Kind = RISCVMCExpr::VK_RISCV_LO; break; Index: test/CodeGen/RISCV/codemodel-lowering.ll =================================================================== --- /dev/null +++ test/CodeGen/RISCV/codemodel-lowering.ll @@ -0,0 +1,76 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+f -code-model=small -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV32I-SMALL +; RUN: llc -mtriple=riscv32 -mattr=+f -code-model=medium -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV32I-MEDIUM + +; Check lowering of globals +@G = global i32 0 + +define i32 @lower_global(i32 %a) nounwind { +; RV32I-SMALL-LABEL: lower_global: +; RV32I-SMALL: # %bb.0: +; RV32I-SMALL-NEXT: lui a0, %hi(G) +; RV32I-SMALL-NEXT: lw a0, %lo(G)(a0) +; RV32I-SMALL-NEXT: ret +; +; RV32I-MEDIUM-LABEL: lower_global: +; RV32I-MEDIUM: # %bb.0: +; RV32I-MEDIUM-NEXT: auipc a0, %pcrel_hi(G) +; RV32I-MEDIUM-NEXT: lw a0, %lo(G)(a0) +; RV32I-MEDIUM-NEXT: ret + %1 = load volatile i32, i32* @G + ret i32 %1 +} + +; Check lowering of blockaddresses + +@addr = global i8* null + +define void @lower_blockaddress() nounwind { +; RV32I-SMALL-LABEL: lower_blockaddress: +; RV32I-SMALL: # %bb.0: +; RV32I-SMALL-NEXT: lui a0, %hi(addr) +; RV32I-SMALL-NEXT: addi a1, zero, 1 +; RV32I-SMALL-NEXT: sw a1, %lo(addr)(a0) +; RV32I-SMALL-NEXT: ret +; +; RV32I-MEDIUM-LABEL: lower_blockaddress: +; RV32I-MEDIUM: # %bb.0: +; RV32I-MEDIUM-NEXT: auipc a0, %pcrel_hi(addr) +; RV32I-MEDIUM-NEXT: addi a1, zero, 1 +; RV32I-MEDIUM-NEXT: sw a1, %lo(addr)(a0) +; RV32I-MEDIUM-NEXT: ret + store volatile i8* blockaddress(@lower_blockaddress, %block), i8** @addr + ret void + +block: + unreachable +} + +; Check lowering of constantpools + +define float @lower_constantpool(float %a) nounwind { +; RV32I-SMALL-LABEL: lower_constantpool: +; RV32I-SMALL: # %bb.0: +; RV32I-SMALL-NEXT: fmv.w.x ft0, a0 +; RV32I-SMALL-NEXT: lui a0, %hi(.LCPI2_0) +; RV32I-SMALL-NEXT: addi a0, a0, %lo(.LCPI2_0) +; RV32I-SMALL-NEXT: flw ft1, 0(a0) +; RV32I-SMALL-NEXT: fadd.s ft0, ft0, ft1 +; RV32I-SMALL-NEXT: fmv.x.w a0, ft0 +; RV32I-SMALL-NEXT: ret +; +; RV32I-MEDIUM-LABEL: lower_constantpool: +; RV32I-MEDIUM: # %bb.0: +; RV32I-MEDIUM-NEXT: fmv.w.x ft0, a0 +; RV32I-MEDIUM-NEXT: auipc a0, %pcrel_hi(.LCPI2_0) +; RV32I-MEDIUM-NEXT: addi a0, a0, %lo(.LCPI2_0) +; RV32I-MEDIUM-NEXT: flw ft1, 0(a0) +; RV32I-MEDIUM-NEXT: fadd.s ft0, ft0, ft1 +; RV32I-MEDIUM-NEXT: fmv.x.w a0, ft0 +; RV32I-MEDIUM-NEXT: ret + %1 = fadd float %a, 1.0 + ret float %1 +} +