Index: llvm/trunk/bindings/go/llvm/target.go =================================================================== --- llvm/trunk/bindings/go/llvm/target.go +++ llvm/trunk/bindings/go/llvm/target.go @@ -62,6 +62,7 @@ const ( CodeModelDefault CodeModel = C.LLVMCodeModelDefault CodeModelJITDefault CodeModel = C.LLVMCodeModelJITDefault + CodeModelTiny CodeModel = C.LLVMCodeModelTiny CodeModelSmall CodeModel = C.LLVMCodeModelSmall CodeModelKernel CodeModel = C.LLVMCodeModelKernel CodeModelMedium CodeModel = C.LLVMCodeModelMedium Index: llvm/trunk/docs/CommandGuide/lli.rst =================================================================== --- llvm/trunk/docs/CommandGuide/lli.rst +++ llvm/trunk/docs/CommandGuide/lli.rst @@ -125,6 +125,7 @@ .. code-block:: text default: Target default code model + tiny: Tiny code model small: Small code model kernel: Kernel code model medium: Medium code model Index: llvm/trunk/include/llvm-c/TargetMachine.h =================================================================== --- llvm/trunk/include/llvm-c/TargetMachine.h +++ llvm/trunk/include/llvm-c/TargetMachine.h @@ -45,6 +45,7 @@ typedef enum { LLVMCodeModelDefault, LLVMCodeModelJITDefault, + LLVMCodeModelTiny, LLVMCodeModelSmall, LLVMCodeModelKernel, LLVMCodeModelMedium, Index: llvm/trunk/include/llvm/CodeGen/CommandFlags.inc =================================================================== --- llvm/trunk/include/llvm/CodeGen/CommandFlags.inc +++ llvm/trunk/include/llvm/CodeGen/CommandFlags.inc @@ -74,7 +74,8 @@ static cl::opt CMModel( "code-model", cl::desc("Choose code model"), - cl::values(clEnumValN(CodeModel::Small, "small", "Small code model"), + cl::values(clEnumValN(CodeModel::Tiny, "tiny", "Tiny code model"), + clEnumValN(CodeModel::Small, "small", "Small code model"), clEnumValN(CodeModel::Kernel, "kernel", "Kernel code model"), clEnumValN(CodeModel::Medium, "medium", "Medium code model"), clEnumValN(CodeModel::Large, "large", "Large code model"))); Index: llvm/trunk/include/llvm/Support/CodeGen.h =================================================================== --- llvm/trunk/include/llvm/Support/CodeGen.h +++ llvm/trunk/include/llvm/Support/CodeGen.h @@ -25,7 +25,7 @@ // Code model types. namespace CodeModel { // Sync changes with CodeGenCWrappers.h. - enum Model { Small, Kernel, Medium, Large }; + enum Model { Tiny, Small, Kernel, Medium, Large }; } namespace PICLevel { Index: llvm/trunk/include/llvm/Target/CodeGenCWrappers.h =================================================================== --- llvm/trunk/include/llvm/Target/CodeGenCWrappers.h +++ llvm/trunk/include/llvm/Target/CodeGenCWrappers.h @@ -31,6 +31,8 @@ LLVM_FALLTHROUGH; case LLVMCodeModelDefault: return None; + case LLVMCodeModelTiny: + return CodeModel::Tiny; case LLVMCodeModelSmall: return CodeModel::Small; case LLVMCodeModelKernel: @@ -45,6 +47,8 @@ inline LLVMCodeModel wrap(CodeModel::Model Model) { switch (Model) { + case CodeModel::Tiny: + return LLVMCodeModelTiny; case CodeModel::Small: return LLVMCodeModelSmall; case CodeModel::Kernel: Index: llvm/trunk/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp =================================================================== --- llvm/trunk/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp +++ llvm/trunk/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp @@ -835,36 +835,55 @@ } case AArch64::LOADgot: { - // Expand into ADRP + LDR. + MachineFunction *MF = MBB.getParent(); unsigned DstReg = MI.getOperand(0).getReg(); const MachineOperand &MO1 = MI.getOperand(1); unsigned Flags = MO1.getTargetFlags(); - MachineInstrBuilder MIB1 = - BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ADRP), DstReg); - MachineInstrBuilder MIB2 = - BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::LDRXui)) - .add(MI.getOperand(0)) - .addReg(DstReg); - if (MO1.isGlobal()) { - MIB1.addGlobalAddress(MO1.getGlobal(), 0, Flags | AArch64II::MO_PAGE); - MIB2.addGlobalAddress(MO1.getGlobal(), 0, - Flags | AArch64II::MO_PAGEOFF | AArch64II::MO_NC); - } else if (MO1.isSymbol()) { - MIB1.addExternalSymbol(MO1.getSymbolName(), Flags | AArch64II::MO_PAGE); - MIB2.addExternalSymbol(MO1.getSymbolName(), - Flags | AArch64II::MO_PAGEOFF | AArch64II::MO_NC); + if (MF->getTarget().getCodeModel() == CodeModel::Tiny) { + // Tiny codemodel expand to LDR + MachineInstrBuilder MIB = BuildMI(MBB, MBBI, MI.getDebugLoc(), + TII->get(AArch64::LDRXl), DstReg); + + if (MO1.isGlobal()) { + MIB.addGlobalAddress(MO1.getGlobal(), 0, Flags); + } else if (MO1.isSymbol()) { + MIB.addExternalSymbol(MO1.getSymbolName(), Flags); + } else { + assert(MO1.isCPI() && + "Only expect globals, externalsymbols, or constant pools"); + MIB.addConstantPoolIndex(MO1.getIndex(), MO1.getOffset(), Flags); + } } else { - assert(MO1.isCPI() && - "Only expect globals, externalsymbols, or constant pools"); - MIB1.addConstantPoolIndex(MO1.getIndex(), MO1.getOffset(), - Flags | AArch64II::MO_PAGE); - MIB2.addConstantPoolIndex(MO1.getIndex(), MO1.getOffset(), - Flags | AArch64II::MO_PAGEOFF | - AArch64II::MO_NC); - } + // Small codemodel expand into ADRP + LDR. + MachineInstrBuilder MIB1 = + BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ADRP), DstReg); + MachineInstrBuilder MIB2 = + BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::LDRXui)) + .add(MI.getOperand(0)) + .addReg(DstReg); - transferImpOps(MI, MIB1, MIB2); + if (MO1.isGlobal()) { + MIB1.addGlobalAddress(MO1.getGlobal(), 0, Flags | AArch64II::MO_PAGE); + MIB2.addGlobalAddress(MO1.getGlobal(), 0, + Flags | AArch64II::MO_PAGEOFF | AArch64II::MO_NC); + } else if (MO1.isSymbol()) { + MIB1.addExternalSymbol(MO1.getSymbolName(), Flags | AArch64II::MO_PAGE); + MIB2.addExternalSymbol(MO1.getSymbolName(), Flags | + AArch64II::MO_PAGEOFF | + AArch64II::MO_NC); + } else { + assert(MO1.isCPI() && + "Only expect globals, externalsymbols, or constant pools"); + MIB1.addConstantPoolIndex(MO1.getIndex(), MO1.getOffset(), + Flags | AArch64II::MO_PAGE); + MIB2.addConstantPoolIndex(MO1.getIndex(), MO1.getOffset(), + Flags | AArch64II::MO_PAGEOFF | + AArch64II::MO_NC); + } + + transferImpOps(MI, MIB1, MIB2); + } MI.eraseFromParent(); return true; } Index: llvm/trunk/lib/Target/AArch64/AArch64FrameLowering.cpp =================================================================== --- llvm/trunk/lib/Target/AArch64/AArch64FrameLowering.cpp +++ llvm/trunk/lib/Target/AArch64/AArch64FrameLowering.cpp @@ -686,6 +686,7 @@ .setMIFlags(MachineInstr::FrameSetup); switch (MF.getTarget().getCodeModel()) { + case CodeModel::Tiny: case CodeModel::Small: case CodeModel::Medium: case CodeModel::Kernel: Index: llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.h =================================================================== --- llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.h +++ llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.h @@ -35,6 +35,7 @@ // offset of a variable into X0, using the TLSDesc model. TLSDESC_CALLSEQ, ADRP, // Page address of a TargetGlobalAddress operand. + ADR, // ADR ADDlow, // Add the low 12 bits of a TargetGlobalAddress operand. LOADgot, // Load from automatically generated descriptor (e.g. Global // Offset Table, TLS record). @@ -587,6 +588,8 @@ SDValue getAddrLarge(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const; template SDValue getAddr(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const; + template + SDValue getAddrTiny(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const; SDValue LowerADDROFRETURNADDR(SDValue Op, SelectionDAG &DAG) const; SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const; SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const; Index: llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp =================================================================== --- llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp +++ llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -1086,6 +1086,7 @@ case AArch64ISD::FIRST_NUMBER: break; case AArch64ISD::CALL: return "AArch64ISD::CALL"; case AArch64ISD::ADRP: return "AArch64ISD::ADRP"; + case AArch64ISD::ADR: return "AArch64ISD::ADR"; case AArch64ISD::ADDlow: return "AArch64ISD::ADDlow"; case AArch64ISD::LOADgot: return "AArch64ISD::LOADgot"; case AArch64ISD::RET_FLAG: return "AArch64ISD::RET_FLAG"; @@ -3912,6 +3913,17 @@ return DAG.getNode(AArch64ISD::ADDlow, DL, Ty, ADRP, Lo); } +// (adr sym) +template +SDValue AArch64TargetLowering::getAddrTiny(NodeTy *N, SelectionDAG &DAG, + unsigned Flags) const { + LLVM_DEBUG(dbgs() << "AArch64TargetLowering::getAddrTiny\n"); + SDLoc DL(N); + EVT Ty = getPointerTy(DAG.getDataLayout()); + SDValue Sym = getTargetNode(N, Ty, DAG, Flags); + return DAG.getNode(AArch64ISD::ADR, DL, Ty, Sym); +} + SDValue AArch64TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const { GlobalAddressSDNode *GN = cast(Op); @@ -3926,7 +3938,8 @@ assert(cast(Op)->getOffset() == 0 && "unexpected offset in global node"); - // This also catches the large code model case for Darwin. + // This also catches the large code model case for Darwin, and tiny code + // model with got relocations. if ((OpFlags & AArch64II::MO_GOT) != 0) { return getGOT(GN, DAG, TargetFlags); } @@ -3934,6 +3947,8 @@ SDValue Result; if (getTargetMachine().getCodeModel() == CodeModel::Large) { Result = getAddrLarge(GN, DAG, TargetFlags); + } else if (getTargetMachine().getCodeModel() == CodeModel::Tiny) { + Result = getAddrTiny(GN, DAG, TargetFlags); } else { Result = getAddr(GN, DAG, TargetFlags); } @@ -4055,13 +4070,15 @@ AArch64TargetLowering::LowerELFGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { assert(Subtarget->isTargetELF() && "This function expects an ELF target"); - assert(Subtarget->useSmallAddressing() && - "ELF TLS only supported in small memory model"); + if (getTargetMachine().getCodeModel() == CodeModel::Large) + report_fatal_error("ELF TLS only supported in small memory model"); // Different choices can be made for the maximum size of the TLS area for a // module. For the small address model, the default TLS size is 16MiB and the // maximum TLS size is 4GiB. // FIXME: add -mtls-size command line option and make it control the 16MiB // vs. 4GiB code sequence generation. + // FIXME: add tiny codemodel support. We currently generate the same code as + // small, which may be larger than needed. const GlobalAddressSDNode *GA = cast(Op); TLSModel::Model Model = getTargetMachine().getTLSModel(GA->getGlobal()); @@ -4779,6 +4796,8 @@ if (getTargetMachine().getCodeModel() == CodeModel::Large && !Subtarget->isTargetMachO()) { return getAddrLarge(JT, DAG); + } else if (getTargetMachine().getCodeModel() == CodeModel::Tiny) { + return getAddrTiny(JT, DAG); } return getAddr(JT, DAG); } @@ -4793,6 +4812,8 @@ return getGOT(CP, DAG); } return getAddrLarge(CP, DAG); + } else if (getTargetMachine().getCodeModel() == CodeModel::Tiny) { + return getAddrTiny(CP, DAG); } else { return getAddr(CP, DAG); } @@ -4804,9 +4825,10 @@ if (getTargetMachine().getCodeModel() == CodeModel::Large && !Subtarget->isTargetMachO()) { return getAddrLarge(BA, DAG); - } else { - return getAddr(BA, DAG); + } else if (getTargetMachine().getCodeModel() == CodeModel::Tiny) { + return getAddrTiny(BA, DAG); } + return getAddr(BA, DAG); } SDValue AArch64TargetLowering::LowerDarwin_VASTART(SDValue Op, Index: llvm/trunk/lib/Target/AArch64/AArch64InstrInfo.cpp =================================================================== --- llvm/trunk/lib/Target/AArch64/AArch64InstrInfo.cpp +++ llvm/trunk/lib/Target/AArch64/AArch64InstrInfo.cpp @@ -1632,6 +1632,9 @@ .addReg(Reg, RegState::Kill) .addImm(0) .addMemOperand(*MI.memoperands_begin()); + } else if (TM.getCodeModel() == CodeModel::Tiny) { + BuildMI(MBB, MI, DL, get(AArch64::ADR), Reg) + .addGlobalAddress(GV, 0, OpFlags); } else { BuildMI(MBB, MI, DL, get(AArch64::ADRP), Reg) .addGlobalAddress(GV, 0, OpFlags | AArch64II::MO_PAGE); Index: llvm/trunk/lib/Target/AArch64/AArch64InstrInfo.td =================================================================== --- llvm/trunk/lib/Target/AArch64/AArch64InstrInfo.td +++ llvm/trunk/lib/Target/AArch64/AArch64InstrInfo.td @@ -176,6 +176,7 @@ // Node definitions. def AArch64adrp : SDNode<"AArch64ISD::ADRP", SDTIntUnaryOp, []>; +def AArch64adr : SDNode<"AArch64ISD::ADR", SDTIntUnaryOp, []>; def AArch64addlow : SDNode<"AArch64ISD::ADDlow", SDTIntBinOp, []>; def AArch64LOADgot : SDNode<"AArch64ISD::LOADgot", SDTIntUnaryOp>; def AArch64callseq_start : SDNode<"ISD::CALLSEQ_START", @@ -1385,7 +1386,8 @@ //===----------------------------------------------------------------------===// let isReMaterializable = 1 in { let hasSideEffects = 0, mayStore = 0, mayLoad = 0 in { -def ADR : ADRI<0, "adr", adrlabel, []>; +def ADR : ADRI<0, "adr", adrlabel, + [(set GPR64:$Xd, (AArch64adr tglobaladdr:$label))]>; } // hasSideEffects = 0 def ADRP : ADRI<1, "adrp", adrplabel, @@ -1393,6 +1395,10 @@ } // isReMaterializable = 1 // page address of a constant pool entry, block address +def : Pat<(AArch64adr tconstpool:$cp), (ADR tconstpool:$cp)>; +def : Pat<(AArch64adr tblockaddress:$cp), (ADR tblockaddress:$cp)>; +def : Pat<(AArch64adr texternalsym:$sym), (ADR texternalsym:$sym)>; +def : Pat<(AArch64adr tjumptable:$sym), (ADR tjumptable:$sym)>; def : Pat<(AArch64adrp tconstpool:$cp), (ADRP tconstpool:$cp)>; def : Pat<(AArch64adrp tblockaddress:$cp), (ADRP tblockaddress:$cp)>; def : Pat<(AArch64adrp texternalsym:$sym), (ADRP texternalsym:$sym)>; Index: llvm/trunk/lib/Target/AArch64/AArch64InstructionSelector.cpp =================================================================== --- llvm/trunk/lib/Target/AArch64/AArch64InstructionSelector.cpp +++ llvm/trunk/lib/Target/AArch64/AArch64InstructionSelector.cpp @@ -983,6 +983,9 @@ materializeLargeCMVal(I, GV, OpFlags); I.eraseFromParent(); return true; + } else if (TM.getCodeModel() == CodeModel::Tiny) { + I.setDesc(TII.get(AArch64::ADR)); + I.getOperand(1).setTargetFlags(OpFlags); } else { I.setDesc(TII.get(AArch64::MOVaddr)); I.getOperand(1).setTargetFlags(OpFlags | AArch64II::MO_PAGE); Index: llvm/trunk/lib/Target/AArch64/AArch64Subtarget.cpp =================================================================== --- llvm/trunk/lib/Target/AArch64/AArch64Subtarget.cpp +++ llvm/trunk/lib/Target/AArch64/AArch64Subtarget.cpp @@ -204,7 +204,9 @@ // The small code model's direct accesses use ADRP, which cannot // necessarily produce the value 0 (if the code is above 4GB). - if (useSmallAddressing() && GV->hasExternalWeakLinkage()) + // Same for the tiny code model, where we have a pc relative LDR. + if ((useSmallAddressing() || TM.getCodeModel() == CodeModel::Tiny) && + GV->hasExternalWeakLinkage()) return AArch64II::MO_GOT | Flags; return Flags; Index: llvm/trunk/lib/Target/AArch64/AArch64TargetMachine.cpp =================================================================== --- llvm/trunk/lib/Target/AArch64/AArch64TargetMachine.cpp +++ llvm/trunk/lib/Target/AArch64/AArch64TargetMachine.cpp @@ -210,14 +210,16 @@ Optional CM, bool JIT) { if (CM) { - if (*CM != CodeModel::Small && *CM != CodeModel::Large) { + if (*CM != CodeModel::Small && *CM != CodeModel::Tiny && + *CM != CodeModel::Large) { if (!TT.isOSFuchsia()) report_fatal_error( - "Only small and large code models are allowed on AArch64"); - else if (CM != CodeModel::Kernel) - report_fatal_error( - "Only small, kernel, and large code models are allowed on AArch64"); - } + "Only small, tiny and large code models are allowed on AArch64"); + else if (*CM != CodeModel::Kernel) + report_fatal_error("Only small, tiny, kernel, and large code models " + "are allowed on AArch64"); + } else if (*CM == CodeModel::Tiny && !TT.isOSBinFormatELF()) + report_fatal_error("tiny code model is only supported on ELF"); return *CM; } // The default MCJIT memory managers make no guarantees about where they can Index: llvm/trunk/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp =================================================================== --- llvm/trunk/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp +++ llvm/trunk/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp @@ -2453,17 +2453,34 @@ SMLoc S = getLoc(); const MCExpr *Expr; - const AsmToken &Tok = getParser().getTok(); - if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) { - if (getParser().parseExpression(Expr)) - return MatchOperand_ParseFail; + // Leave anything with a bracket to the default for SVE + if (getParser().getTok().is(AsmToken::LBrac)) + return MatchOperand_NoMatch; - SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1); - Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext())); + if (getParser().getTok().is(AsmToken::Hash)) + getParser().Lex(); // Eat hash token. - return MatchOperand_Success; + if (parseSymbolicImmVal(Expr)) + return MatchOperand_ParseFail; + + AArch64MCExpr::VariantKind ELFRefKind; + MCSymbolRefExpr::VariantKind DarwinRefKind; + int64_t Addend; + if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) { + if (DarwinRefKind == MCSymbolRefExpr::VK_None && + ELFRefKind == AArch64MCExpr::VK_INVALID) { + // No modifier was specified at all; this is the syntax for an ELF basic + // ADR relocation (unfortunately). + Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS, getContext()); + } else { + Error(S, "unexpected adr label"); + return MatchOperand_ParseFail; + } } - return MatchOperand_NoMatch; + + SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1); + Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext())); + return MatchOperand_Success; } /// tryParseFPImm - A floating point immediate expression operand. Index: llvm/trunk/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp =================================================================== --- llvm/trunk/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp +++ llvm/trunk/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp @@ -10,6 +10,7 @@ #include "AArch64.h" #include "AArch64RegisterInfo.h" #include "MCTargetDesc/AArch64FixupKinds.h" +#include "MCTargetDesc/AArch64MCExpr.h" #include "llvm/ADT/Triple.h" #include "llvm/BinaryFormat/MachO.h" #include "llvm/MC/MCAsmBackend.h" @@ -376,6 +377,14 @@ // to the linker -- a relocation! if ((uint32_t)Fixup.getKind() == AArch64::fixup_aarch64_pcrel_adrp_imm21) return true; + + AArch64MCExpr::VariantKind RefKind = + static_cast(Target.getRefKind()); + AArch64MCExpr::VariantKind SymLoc = AArch64MCExpr::getSymbolLoc(RefKind); + // LDR GOT relocations need a relocation + if ((uint32_t)Fixup.getKind() == AArch64::fixup_aarch64_ldr_pcrel_imm19 && + SymLoc == AArch64MCExpr::VK_GOT) + return true; return false; } Index: llvm/trunk/lib/Target/AArch64/MCTargetDesc/AArch64ELFObjectWriter.cpp =================================================================== --- llvm/trunk/lib/Target/AArch64/MCTargetDesc/AArch64ELFObjectWriter.cpp +++ llvm/trunk/lib/Target/AArch64/MCTargetDesc/AArch64ELFObjectWriter.cpp @@ -138,7 +138,9 @@ } else return ELF::R_AARCH64_PREL64; case AArch64::fixup_aarch64_pcrel_adr_imm21: - assert(SymLoc == AArch64MCExpr::VK_NONE && "unexpected ADR relocation"); + if (SymLoc != AArch64MCExpr::VK_ABS) + Ctx.reportError(Fixup.getLoc(), + "invalid symbol kind for ADR relocation"); return R_CLS(ADR_PREL_LO21); case AArch64::fixup_aarch64_pcrel_adrp_imm21: if (SymLoc == AArch64MCExpr::VK_ABS && !IsNC) Index: llvm/trunk/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.cpp =================================================================== --- llvm/trunk/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.cpp +++ llvm/trunk/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.cpp @@ -62,8 +62,10 @@ case VK_TLSDESC_LO12: return ":tlsdesc_lo12:"; case VK_ABS_PAGE: return ""; case VK_ABS_PAGE_NC: return ":pg_hi21_nc:"; + case VK_GOT: return ":got:"; case VK_GOT_PAGE: return ":got:"; case VK_GOT_LO12: return ":got_lo12:"; + case VK_GOTTPREL: return ":gottprel:"; case VK_GOTTPREL_PAGE: return ":gottprel:"; case VK_GOTTPREL_LO12_NC: return ":gottprel_lo12:"; case VK_GOTTPREL_G1: return ":gottprel_g1:"; Index: llvm/trunk/lib/Target/ARM/ARMFrameLowering.cpp =================================================================== --- llvm/trunk/lib/Target/ARM/ARMFrameLowering.cpp +++ llvm/trunk/lib/Target/ARM/ARMFrameLowering.cpp @@ -526,6 +526,8 @@ .setMIFlags(MachineInstr::FrameSetup); switch (TM.getCodeModel()) { + case CodeModel::Tiny: + llvm_unreachable("Tiny code model not available on ARM."); case CodeModel::Small: case CodeModel::Medium: case CodeModel::Kernel: Index: llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp =================================================================== --- llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp +++ llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp @@ -9207,6 +9207,8 @@ // IP. switch (TM.getCodeModel()) { + case CodeModel::Tiny: + llvm_unreachable("Tiny code model not available on ARM."); case CodeModel::Small: case CodeModel::Medium: case CodeModel::Kernel: Index: llvm/trunk/lib/Target/X86/X86Subtarget.cpp =================================================================== --- llvm/trunk/lib/Target/X86/X86Subtarget.cpp +++ llvm/trunk/lib/Target/X86/X86Subtarget.cpp @@ -77,6 +77,8 @@ if (isTargetELF()) { switch (TM.getCodeModel()) { // 64-bit small code model is simple: All rip-relative. + case CodeModel::Tiny: + llvm_unreachable("Tiny codesize model not supported on X86"); case CodeModel::Small: case CodeModel::Kernel: return X86II::MO_NO_FLAG; Index: llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-gv-cmodel-tiny.mir =================================================================== --- llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-gv-cmodel-tiny.mir +++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-gv-cmodel-tiny.mir @@ -0,0 +1,56 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -mtriple=aarch64-none-eabi -code-model=tiny -run-pass=instruction-select -verify-machineinstrs -O0 %s -o - | FileCheck %s +--- | + target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" + + @foo1 = common global [1073741824 x i32] zeroinitializer, align 4 + @foo2 = common global [1073741824 x i32] zeroinitializer, align 4 + + define i32 @gv_tiny() { + entry: + %retval = alloca i32, align 4 + store i32 0, i32* %retval, align 4 + %0 = load i32, i32* getelementptr inbounds ([1073741824 x i32], [1073741824 x i32]* @foo1, i64 0, i64 0), align 4 + %1 = load i32, i32* getelementptr inbounds ([1073741824 x i32], [1073741824 x i32]* @foo2, i64 0, i64 0), align 4 + %add = add nsw i32 %0, %1 + ret i32 %add + } + +... +--- +name: gv_tiny +legalized: true +regBankSelected: true +stack: + - { id: 0, name: retval, type: default, offset: 0, size: 4, alignment: 4, + stack-id: 0, callee-saved-register: '', callee-saved-restored: true, + debug-info-variable: '', debug-info-expression: '', + debug-info-location: '' } +constants: +body: | + bb.1: + ; CHECK-LABEL: name: gv_tiny + ; CHECK: [[ADR:%[0-9]+]]:gpr64 = ADR @foo1 + ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY [[ADR]] + ; CHECK: [[ADR1:%[0-9]+]]:gpr64 = ADR @foo2 + ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY [[ADR1]] + ; CHECK: STRWui $wzr, %stack.0.retval, 0 :: (store 4 into %ir.retval) + ; CHECK: [[LDRWui:%[0-9]+]]:gpr32 = LDRWui [[COPY]], 0 :: (load 4 from `i32* getelementptr inbounds ([1073741824 x i32], [1073741824 x i32]* @foo1, i64 0, i64 0)`) + ; CHECK: [[LDRWui1:%[0-9]+]]:gpr32 = LDRWui [[COPY1]], 0 :: (load 4 from `i32* getelementptr inbounds ([1073741824 x i32], [1073741824 x i32]* @foo2, i64 0, i64 0)`) + ; CHECK: [[ADDWrr:%[0-9]+]]:gpr32 = ADDWrr [[LDRWui]], [[LDRWui1]] + ; CHECK: $w0 = COPY [[ADDWrr]] + ; CHECK: RET_ReallyLR implicit $w0 + %1:gpr(s32) = G_CONSTANT i32 0 + %4:gpr(p0) = G_GLOBAL_VALUE @foo1 + %3:gpr(p0) = COPY %4(p0) + %7:gpr(p0) = G_GLOBAL_VALUE @foo2 + %6:gpr(p0) = COPY %7(p0) + %0:gpr(p0) = G_FRAME_INDEX %stack.0.retval + G_STORE %1(s32), %0(p0) :: (store 4 into %ir.retval) + %2:gpr(s32) = G_LOAD %3(p0) :: (load 4 from `i32* getelementptr inbounds ([1073741824 x i32], [1073741824 x i32]* @foo1, i64 0, i64 0)`) + %5:gpr(s32) = G_LOAD %6(p0) :: (load 4 from `i32* getelementptr inbounds ([1073741824 x i32], [1073741824 x i32]* @foo2, i64 0, i64 0)`) + %8:gpr(s32) = G_ADD %2, %5 + $w0 = COPY %8(s32) + RET_ReallyLR implicit $w0 + +... Index: llvm/trunk/test/CodeGen/AArch64/arm64-tls-dynamics.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-tls-dynamics.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-tls-dynamics.ll @@ -2,6 +2,12 @@ ; RUN: llc -mtriple=arm64-none-linux-gnu -relocation-model=pic -aarch64-elf-ldtls-generation=1 -filetype=obj < %s | llvm-objdump -r - | FileCheck --check-prefix=CHECK-RELOC %s ; RUN: llc -mtriple=arm64-none-linux-gnu -relocation-model=pic -verify-machineinstrs < %s | FileCheck --check-prefix=CHECK-NOLD %s ; RUN: llc -mtriple=arm64-none-linux-gnu -relocation-model=pic -filetype=obj < %s | llvm-objdump -r - | FileCheck --check-prefix=CHECK-NOLD-RELOC %s +; FIXME: We currently produce "small" code for the tiny model +; RUN: llc -mtriple=arm64-none-linux-gnu -relocation-model=pic -aarch64-elf-ldtls-generation=1 -code-model=tiny -verify-machineinstrs < %s | FileCheck %s +; FIXME: We currently error for the large code model +; RUN: not llc -mtriple=arm64-none-linux-gnu -relocation-model=pic -aarch64-elf-ldtls-generation=1 -code-model=large -verify-machineinstrs < %s 2>&1 | FileCheck %s --check-prefix=CHECK-LARGE + +; CHECK-LARGE: ELF TLS only supported in small memory model @general_dynamic_var = external thread_local global i32 Index: llvm/trunk/test/CodeGen/AArch64/arm64-tls-execs.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-tls-execs.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-tls-execs.ll @@ -1,5 +1,11 @@ ; RUN: llc -mtriple=arm64-none-linux-gnu -verify-machineinstrs -show-mc-encoding < %s | FileCheck %s ; RUN: llc -mtriple=arm64-none-linux-gnu -filetype=obj < %s | llvm-objdump -r - | FileCheck --check-prefix=CHECK-RELOC %s +; RUN: llc -mtriple=arm64-none-linux-gnu -verify-machineinstrs -show-mc-encoding -code-model=tiny < %s | FileCheck %s --check-prefix=CHECK-TINY +; RUN: llc -mtriple=arm64-none-linux-gnu -filetype=obj < %s -code-model=tiny | llvm-objdump -r - | FileCheck --check-prefix=CHECK-TINY-RELOC %s +; FIXME: We currently error for the large code model +; RUN: not llc -mtriple=arm64-none-linux-gnu -verify-machineinstrs -show-mc-encoding -code-model=large < %s 2>&1 | FileCheck %s --check-prefix=CHECK-LARGE + +; CHECK-LARGE: ELF TLS only supported in small memory model @initial_exec_var = external thread_local(initialexec) global i32 @@ -15,6 +21,12 @@ ; CHECK-RELOC: R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 ; CHECK-RELOC: R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC +; CHECK-TINY: ldr x[[TP_OFFSET:[0-9]+]], :gottprel:initial_exec_var +; CHECK-TINY: mrs x[[TP:[0-9]+]], TPIDR_EL0 +; CHECK-TINY: ldr w0, [x[[TP]], x[[TP_OFFSET]]] + +; CHECK-TINY-RELOC: R_AARCH64_TLSIE_LD_GOTTPREL_PREL19 + ret i32 %val } @@ -30,6 +42,12 @@ ; CHECK-RELOC: R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 ; CHECK-RELOC: R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC +; CHECK-TINY: ldr x[[TP_OFFSET:[0-9]+]], :gottprel:initial_exec_var +; CHECK-TINY: mrs [[TP:x[0-9]+]], TPIDR_EL0 +; CHECK-TINY: add x0, [[TP]], x[[TP_OFFSET]] + +; CHECK-TINY-RELOC: R_AARCH64_TLSIE_LD_GOTTPREL_PREL19 + } @local_exec_var = thread_local(localexec) global i32 0 @@ -45,6 +63,14 @@ ; CHECK-RELOC: R_AARCH64_TLSLE_ADD_TPREL_HI12 ; CHECK-RELOC: R_AARCH64_TLSLE_ADD_TPREL_LO12_NC + +; CHECK-TINY: mrs x[[R1:[0-9]+]], TPIDR_EL0 +; CHECK-TINY: add x[[R2:[0-9]+]], x[[R1]], :tprel_hi12:local_exec_var +; CHECK-TINY: add x[[R3:[0-9]+]], x[[R2]], :tprel_lo12_nc:local_exec_var +; CHECK-TINY: ldr w0, [x[[R3]]] + +; CHECK-TINY-RELOC: R_AARCH64_TLSLE_ADD_TPREL_HI12 +; CHECK-TINY-RELOC: R_AARCH64_TLSLE_ADD_TPREL_LO12_NC ret i32 %val } @@ -59,4 +85,12 @@ ; CHECK-RELOC: R_AARCH64_TLSLE_ADD_TPREL_HI12 ; CHECK-RELOC: R_AARCH64_TLSLE_ADD_TPREL_LO12_NC + +; CHECK-TINY: mrs x[[R1:[0-9]+]], TPIDR_EL0 +; CHECK-TINY: add x[[R2:[0-9]+]], x[[R1]], :tprel_hi12:local_exec_var +; CHECK-TINY: add x0, x[[R2]], :tprel_lo12_nc:local_exec_var +; CHECK-TINY: ret + +; CHECK-TINY-RELOC: R_AARCH64_TLSLE_ADD_TPREL_HI12 +; CHECK-TINY-RELOC: R_AARCH64_TLSLE_ADD_TPREL_LO12_NC } Index: llvm/trunk/test/CodeGen/AArch64/blockaddress.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/blockaddress.ll +++ llvm/trunk/test/CodeGen/AArch64/blockaddress.ll @@ -1,5 +1,6 @@ ; RUN: llc -mtriple=aarch64-none-linux-gnu -aarch64-enable-atomic-cfg-tidy=0 -verify-machineinstrs < %s | FileCheck %s ; RUN: llc -code-model=large -mtriple=aarch64-none-linux-gnu -aarch64-enable-atomic-cfg-tidy=0 -verify-machineinstrs < %s | FileCheck --check-prefix=CHECK-LARGE %s +; RUN: llc -code-model=tiny -mtriple=aarch64-none-none-eabi -aarch64-enable-atomic-cfg-tidy=0 -verify-machineinstrs < %s | FileCheck --check-prefix=CHECK-TINY %s @addr = global i8* null @@ -22,6 +23,11 @@ ; CHECK-LARGE: ldr [[NEWDEST:x[0-9]+]] ; CHECK-LARGE: br [[NEWDEST]] +; CHECK-TINY: adr [[DEST:x[0-9]+]], {{.Ltmp[0-9]+}} +; CHECK-TINY: str [[DEST]], +; CHECK-TINY: ldr [[NEWDEST:x[0-9]+]] +; CHECK-TINY: br [[NEWDEST]] + block: ret void } Index: llvm/trunk/test/CodeGen/AArch64/code-model-tiny-abs.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/code-model-tiny-abs.ll +++ llvm/trunk/test/CodeGen/AArch64/code-model-tiny-abs.ll @@ -0,0 +1,54 @@ +; RUN: llc -mtriple=aarch64-none-eabi -code-model=tiny < %s | FileCheck %s + +@var8 = global i8 0 +@var16 = global i16 0 +@var32 = global i32 0 +@var64 = global i64 0 + +define i8* @global_addr() { +; CHECK-LABEL: global_addr: + ret i8* @var8 + ; The adr calculation should end up returned directly in x0. +; CHECK: adr x0, var8 +; CHECK-NEXT: ret +} + +define i8 @global_i8() { +; CHECK-LABEL: global_i8: + %val = load i8, i8* @var8 + ret i8 %val +; CHECK: adr x[[ADDR_REG:[0-9]+]], var8 +; CHECK: ldrb w0, [x[[ADDR_REG]]] +} + +define i16 @global_i16() { +; CHECK-LABEL: global_i16: + %val = load i16, i16* @var16 + ret i16 %val +; CHECK: adr x[[ADDR_REG:[0-9]+]], var16 +; CHECK: ldrh w0, [x[[ADDR_REG]]] +} + +define i32 @global_i32() { +; CHECK-LABEL: global_i32: + %val = load i32, i32* @var32 + ret i32 %val +; CHECK: adr x[[ADDR_REG:[0-9]+]], var32 +; CHECK: ldr w0, [x[[ADDR_REG]]] +} + +define i64 @global_i64() { +; CHECK-LABEL: global_i64: + %val = load i64, i64* @var64 + ret i64 %val +; CHECK: adr x[[ADDR_REG:[0-9]+]], var64 +; CHECK: ldr x0, [x[[ADDR_REG]]] +} + +define <2 x i64> @constpool() { +; CHECK-LABEL: constpool: + ret <2 x i64> + +; CHECK: adr x[[ADDR_REG:[0-9]+]], {{.LCPI[0-9]+_[0-9]+}} +; CHECK: ldr q0, [x[[ADDR_REG]]] +} Index: llvm/trunk/test/CodeGen/AArch64/extern-weak.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/extern-weak.ll +++ llvm/trunk/test/CodeGen/AArch64/extern-weak.ll @@ -1,6 +1,7 @@ ; RUN: llc -mtriple=aarch64-none-linux-gnu -relocation-model=pic -o - %s | FileCheck %s ; RUN: llc -mtriple=aarch64-none-linux-gnu -relocation-model=static -o - < %s | FileCheck %s ; RUN: llc -mtriple=aarch64-none-linux-gnu -code-model=large -o - %s | FileCheck --check-prefix=CHECK-LARGE %s +; RUN: llc -mtriple=aarch64-none-none-eabi -code-model=tiny -o - %s | FileCheck --check-prefix=CHECK-TINY %s declare extern_weak i32 @var() @@ -20,6 +21,9 @@ ; CHECK-LARGE: movk x0, #:abs_g1_nc:var ; CHECK-LARGE: movk x0, #:abs_g2_nc:var ; CHECK-LARGE: movk x0, #:abs_g3:var + + ; In the tiny codemodel we us a got relocated LDR. +; CHECK-TINY: ldr x0, :got:var } @@ -41,6 +45,9 @@ ; CHECK-LARGE: movk [[ADDR]], #:abs_g1_nc:arr_var ; CHECK-LARGE: movk [[ADDR]], #:abs_g2_nc:arr_var ; CHECK-LARGE: movk [[ADDR]], #:abs_g3:arr_var + +; CHECK-TINY: ldr [[BASE:x[0-9]+]], :got:arr_var +; CHECK-TINY: add x0, [[BASE]], #20 } @defined_weak_var = internal unnamed_addr global i32 0 @@ -55,4 +62,6 @@ ; CHECK-LARGE: movk x0, #:abs_g1_nc:defined_weak_var ; CHECK-LARGE: movk x0, #:abs_g2_nc:defined_weak_var ; CHECK-LARGE: movk x0, #:abs_g3:defined_weak_var + +; CHECK-TINY: adr x0, defined_weak_var } Index: llvm/trunk/test/CodeGen/AArch64/fpimm.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/fpimm.ll +++ llvm/trunk/test/CodeGen/AArch64/fpimm.ll @@ -1,39 +1,48 @@ ; RUN: llc -mtriple=aarch64-linux-gnu -verify-machineinstrs < %s | FileCheck %s ; RUN: llc -mtriple=aarch64-apple-darwin -code-model=large -verify-machineinstrs < %s | FileCheck %s --check-prefix=LARGE ; RUN: llc -mtriple=aarch64-apple-darwin -code-model=large -fast-isel -fast-isel-abort=1 -verify-machineinstrs < %s | FileCheck %s --check-prefix=LARGE +; RUN: llc -mtriple=aarch64-none-eabi -code-model=tiny -verify-machineinstrs < %s | FileCheck %s --check-prefix=TINY @varf32 = global float 0.0 @varf64 = global double 0.0 define void @check_float() { ; CHECK-LABEL: check_float: +; TINY-LABEL: check_float: %val = load float, float* @varf32 %newval1 = fadd float %val, 8.5 store volatile float %newval1, float* @varf32 -; CHECK-DAG: fmov [[EIGHT5:s[0-9]+]], #8.5 +; CHECK-DAG: fmov {{s[0-9]+}}, #8.5 +; TINY-DAG: fmov {{s[0-9]+}}, #8.5 %newval2 = fadd float %val, 128.0 store volatile float %newval2, float* @varf32 -; CHECK-DAG: ldr [[HARD:s[0-9]+]], [{{x[0-9]+}}, {{#?}}:lo12:.LCPI0_0 +; CHECK-DAG: ldr {{s[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:.LCPI0_0 +; TINY-DAG: ldr {{s[0-9]+}}, [{{x[0-9]+}}] ; CHECK: ret +; TINY: ret ret void } define void @check_double() { ; CHECK-LABEL: check_double: +; TINY-LABEL: check_double: %val = load double, double* @varf64 %newval1 = fadd double %val, 8.5 store volatile double %newval1, double* @varf64 ; CHECK-DAG: fmov {{d[0-9]+}}, #8.5 +; TINY-DAG: fmov {{d[0-9]+}}, #8.5 %newval2 = fadd double %val, 128.0 store volatile double %newval2, double* @varf64 ; CHECK-DAG: ldr {{d[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:.LCPI1_0 +; TINY-DAG: ldr {{d[0-9]+}}, [{{x[0-9]+}}] ; CHECK: ret +; TINY: ret ret void } @@ -41,6 +50,9 @@ ; LARGE: mov [[REG:w[0-9]+]], #4059 ; LARGE-NEXT: movk [[REG]], #16457, lsl #16 ; LARGE-NEXT: fmov s0, [[REG]] +; TINY-LABEL: check_float2 +; TINY: adr x[[REG:[0-9]+]], .LCPI2_0 +; TINY-NEXT: ldr s0, [x[[REG]]] define float @check_float2() { ret float 3.14159274101257324218750 } @@ -51,6 +63,9 @@ ; LARGE-NEXT: movk [[REG]], #8699, lsl #32 ; LARGE-NEXT: movk [[REG]], #16393, lsl #48 ; LARGE-NEXT: fmov d0, [[REG]] +; TINY-LABEL: check_double2 +; TINY: adr x[[REG:[0-9]+]], .LCPI3_0 +; TINY-NEXT: ldr d0, [x[[REG]]] define double @check_double2() { ret double 3.1415926535897931159979634685441851615905761718750 } Index: llvm/trunk/test/CodeGen/AArch64/jump-table.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/jump-table.ll +++ llvm/trunk/test/CodeGen/AArch64/jump-table.ll @@ -1,6 +1,7 @@ ; RUN: llc -verify-machineinstrs -o - %s -mtriple=aarch64-none-linux-gnu -aarch64-enable-atomic-cfg-tidy=0 | FileCheck %s ; RUN: llc -code-model=large -verify-machineinstrs -o - %s -mtriple=aarch64-none-linux-gnu -aarch64-enable-atomic-cfg-tidy=0 | FileCheck --check-prefix=CHECK-LARGE %s ; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs -relocation-model=pic -aarch64-enable-atomic-cfg-tidy=0 -o - %s | FileCheck --check-prefix=CHECK-PIC %s +; RUN: llc -code-model=tiny -verify-machineinstrs -o - %s -mtriple=aarch64-none-linux-gnu -aarch64-enable-atomic-cfg-tidy=0 | FileCheck --check-prefix=CHECK-TINY %s define i32 @test_jumptable(i32 %in) { ; CHECK: test_jumptable @@ -29,6 +30,10 @@ ; CHECK-PIC: add [[TABLE:x[0-9]+]], [[DEST]], x[[JT]] ; CHECK-PIC: br [[TABLE]] +; CHECK-TINY: adr x[[JT:[0-9]+]], .LJTI0_0 +; CHECK-TINY: ldr [[DEST:x[0-9]+]], [x[[JT]], {{x[0-9]+}}, lsl #3] +; CHECK-TINY: br [[DEST]] + def: ret i32 0 Index: llvm/trunk/test/CodeGen/AArch64/literal_pools_float.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/literal_pools_float.ll +++ llvm/trunk/test/CodeGen/AArch64/literal_pools_float.ll @@ -1,7 +1,9 @@ ; RUN: llc -verify-machineinstrs -o - %s -mtriple=aarch64-none-linux-gnu -mcpu=cyclone | FileCheck %s ; RUN: llc -verify-machineinstrs -o - %s -mtriple=aarch64-none-linux-gnu -code-model=large -mcpu=cyclone | FileCheck --check-prefix=CHECK-LARGE %s +; RUN: llc -verify-machineinstrs -o - %s -mtriple=aarch64-none-none-eabi -code-model=tiny -mcpu=cyclone | FileCheck --check-prefix=CHECK-TINY %s ; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -mattr=-fp-armv8 | FileCheck --check-prefix=CHECK-NOFP %s ; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -code-model=large -mattr=-fp-armv8 | FileCheck --check-prefix=CHECK-NOFP-LARGE %s +; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-none-eabi -code-model=tiny -mattr=-fp-armv8 | FileCheck --check-prefix=CHECK-NOFP-TINY %s @varfloat = global float 0.0 @vardouble = global double 0.0 @@ -15,6 +17,10 @@ ; CHECK: ldr [[LIT128:s[0-9]+]], [x[[LITBASE]], {{#?}}:lo12:[[CURLIT]]] ; CHECK-NOFP-NOT: ldr {{s[0-9]+}}, +; CHECK-TINY: adr x[[LITBASE:[0-9]+]], [[CURLIT:.LCPI[0-9]+_[0-9]+]] +; CHECK-TINY: ldr [[LIT128:s[0-9]+]], [x[[LITBASE]]] +; CHECK-NOFP-TINY-NOT: ldr {{s[0-9]+}}, + ; CHECK-LARGE: movz x[[LITADDR:[0-9]+]], #:abs_g0_nc:[[CURLIT:.LCPI[0-9]+_[0-9]+]] ; CHECK-LARGE: movk x[[LITADDR]], #:abs_g1_nc:[[CURLIT]] ; CHECK-LARGE: movk x[[LITADDR]], #:abs_g2_nc:[[CURLIT]] @@ -33,6 +39,11 @@ ; CHECK-NOFP-NOT: ldr {{d[0-9]+}}, ; CHECK-NOFP-NOT: fadd +; CHECK-TINY: adr x[[LITBASE:[0-9]+]], [[CURLIT:.LCPI[0-9]+_[0-9]+]] +; CHECK-TINY: ldr [[LIT129:d[0-9]+]], [x[[LITBASE]]] +; CHECK-NOFP-TINY-NOT: ldr {{d[0-9]+}}, +; CHECK-NOFP-TINY-NOT: fadd + ; CHECK-LARGE: movz x[[LITADDR:[0-9]+]], #:abs_g0_nc:[[CURLIT:.LCPI[0-9]+_[0-9]+]] ; CHECK-LARGE: movk x[[LITADDR]], #:abs_g1_nc:[[CURLIT]] ; CHECK-LARGE: movk x[[LITADDR]], #:abs_g2_nc:[[CURLIT]] Index: llvm/trunk/test/CodeGen/AArch64/tiny_model.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/tiny_model.ll +++ llvm/trunk/test/CodeGen/AArch64/tiny_model.ll @@ -0,0 +1,421 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -verify-machineinstrs -o - -mtriple=aarch64-none-linux-gnu -code-model=tiny < %s | FileCheck %s +; RUN: llc -verify-machineinstrs -o - -mtriple=aarch64-none-linux-gnu -code-model=tiny -fast-isel < %s | FileCheck %s +; RUN: llc -verify-machineinstrs -o - -mtriple=aarch64-none-linux-gnu -code-model=tiny -global-isel < %s | FileCheck %s --check-prefix=CHECK-GLOBISEL +; RUN: llc -verify-machineinstrs -o - -mtriple=aarch64-none-linux-gnu -code-model=tiny -relocation-model=pic < %s | FileCheck %s --check-prefix=CHECK-PIC +; RUN: llc -verify-machineinstrs -o - -mtriple=aarch64-none-linux-gnu -code-model=tiny -relocation-model=pic -fast-isel < %s | FileCheck %s --check-prefix=CHECK-PIC +; RUN: llc -verify-machineinstrs -o - -mtriple=aarch64-none-linux-gnu -code-model=tiny -relocation-model=pic -global-isel < %s | FileCheck %s --check-prefix=CHECK-PIC-GLOBISEL + +; Note fast-isel tests here will fall back to isel + +@src = external local_unnamed_addr global [65536 x i8], align 1 +@dst = external global [65536 x i8], align 1 +@ptr = external local_unnamed_addr global i8*, align 8 + +define void @foo1() { +; CHECK-LABEL: foo1: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: adr x8, src +; CHECK-NEXT: ldrb w8, [x8] +; CHECK-NEXT: adr x9, dst +; CHECK-NEXT: strb w8, [x9] +; CHECK-NEXT: ret +; +; CHECK-GLOBISEL-LABEL: foo1: +; CHECK-GLOBISEL: // %bb.0: // %entry +; CHECK-GLOBISEL-NEXT: adr x8, src +; CHECK-GLOBISEL-NEXT: ldrb w8, [x8] +; CHECK-GLOBISEL-NEXT: adr x9, dst +; CHECK-GLOBISEL-NEXT: strb w8, [x9] +; CHECK-GLOBISEL-NEXT: ret +; +; CHECK-PIC-LABEL: foo1: +; CHECK-PIC: // %bb.0: // %entry +; CHECK-PIC-NEXT: ldr x8, :got:src +; CHECK-PIC-NEXT: ldrb w8, [x8] +; CHECK-PIC-NEXT: ldr x9, :got:dst +; CHECK-PIC-NEXT: strb w8, [x9] +; CHECK-PIC-NEXT: ret +; +; CHECK-PIC-GLOBISEL-LABEL: foo1: +; CHECK-PIC-GLOBISEL: // %bb.0: // %entry +; CHECK-PIC-GLOBISEL-NEXT: ldr x8, :got:src +; CHECK-PIC-GLOBISEL-NEXT: ldrb w8, [x8] +; CHECK-PIC-GLOBISEL-NEXT: ldr x9, :got:dst +; CHECK-PIC-GLOBISEL-NEXT: strb w8, [x9] +; CHECK-PIC-GLOBISEL-NEXT: ret +entry: + %0 = load i8, i8* getelementptr inbounds ([65536 x i8], [65536 x i8]* @src, i64 0, i64 0), align 1 + store i8 %0, i8* getelementptr inbounds ([65536 x i8], [65536 x i8]* @dst, i64 0, i64 0), align 1 + ret void +} + +define void @foo2() { +; CHECK-LABEL: foo2: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: adr x8, ptr +; CHECK-NEXT: adr x9, dst +; CHECK-NEXT: str x9, [x8] +; CHECK-NEXT: ret +; +; CHECK-GLOBISEL-LABEL: foo2: +; CHECK-GLOBISEL: // %bb.0: // %entry +; CHECK-GLOBISEL-NEXT: adr x8, dst +; CHECK-GLOBISEL-NEXT: adr x9, ptr +; CHECK-GLOBISEL-NEXT: str x8, [x9] +; CHECK-GLOBISEL-NEXT: ret +; +; CHECK-PIC-LABEL: foo2: +; CHECK-PIC: // %bb.0: // %entry +; CHECK-PIC-NEXT: ldr x8, :got:ptr +; CHECK-PIC-NEXT: ldr x9, :got:dst +; CHECK-PIC-NEXT: str x9, [x8] +; CHECK-PIC-NEXT: ret +; +; CHECK-PIC-GLOBISEL-LABEL: foo2: +; CHECK-PIC-GLOBISEL: // %bb.0: // %entry +; CHECK-PIC-GLOBISEL-NEXT: ldr x8, :got:dst +; CHECK-PIC-GLOBISEL-NEXT: ldr x9, :got:ptr +; CHECK-PIC-GLOBISEL-NEXT: str x8, [x9] +; CHECK-PIC-GLOBISEL-NEXT: ret +entry: + store i8* getelementptr inbounds ([65536 x i8], [65536 x i8]* @dst, i64 0, i64 0), i8** @ptr, align 8 + ret void +} + +define void @foo3() { +; FIXME: Needn't adr ptr +; +; CHECK-LABEL: foo3: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: adr x8, src +; CHECK-NEXT: adr x9, ptr +; CHECK-NEXT: ldrb w8, [x8] +; CHECK-NEXT: ldr x9, [x9] +; CHECK-NEXT: strb w8, [x9] +; CHECK-NEXT: ret +; +; CHECK-GLOBISEL-LABEL: foo3: +; CHECK-GLOBISEL: // %bb.0: // %entry +; CHECK-GLOBISEL-NEXT: adr x8, src +; CHECK-GLOBISEL-NEXT: adr x9, ptr +; CHECK-GLOBISEL-NEXT: ldrb w8, [x8] +; CHECK-GLOBISEL-NEXT: ldr x9, [x9] +; CHECK-GLOBISEL-NEXT: strb w8, [x9] +; CHECK-GLOBISEL-NEXT: ret +; +; CHECK-PIC-LABEL: foo3: +; CHECK-PIC: // %bb.0: // %entry +; CHECK-PIC-NEXT: ldr x8, :got:src +; CHECK-PIC-NEXT: ldr x9, :got:ptr +; CHECK-PIC-NEXT: ldrb w8, [x8] +; CHECK-PIC-NEXT: ldr x9, [x9] +; CHECK-PIC-NEXT: strb w8, [x9] +; CHECK-PIC-NEXT: ret +; +; CHECK-PIC-GLOBISEL-LABEL: foo3: +; CHECK-PIC-GLOBISEL: // %bb.0: // %entry +; CHECK-PIC-GLOBISEL-NEXT: ldr x8, :got:src +; CHECK-PIC-GLOBISEL-NEXT: ldr x9, :got:ptr +; CHECK-PIC-GLOBISEL-NEXT: ldrb w8, [x8] +; CHECK-PIC-GLOBISEL-NEXT: ldr x9, [x9] +; CHECK-PIC-GLOBISEL-NEXT: strb w8, [x9] +; CHECK-PIC-GLOBISEL-NEXT: ret +entry: + %0 = load i8, i8* getelementptr inbounds ([65536 x i8], [65536 x i8]* @src, i64 0, i64 0), align 1 + %1 = load i8*, i8** @ptr, align 8 + store i8 %0, i8* %1, align 1 + ret void +} + +@lsrc = internal global i8 0, align 4 +@ldst = internal global i8 0, align 4 +@lptr = internal global i8* null, align 8 + +define void @bar1() { +; CHECK-LABEL: bar1: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: adr x8, lsrc +; CHECK-NEXT: ldrb w8, [x8] +; CHECK-NEXT: adr x9, ldst +; CHECK-NEXT: strb w8, [x9] +; CHECK-NEXT: ret +; +; CHECK-GLOBISEL-LABEL: bar1: +; CHECK-GLOBISEL: // %bb.0: // %entry +; CHECK-GLOBISEL-NEXT: adr x8, lsrc +; CHECK-GLOBISEL-NEXT: ldrb w8, [x8] +; CHECK-GLOBISEL-NEXT: adr x9, ldst +; CHECK-GLOBISEL-NEXT: strb w8, [x9] +; CHECK-GLOBISEL-NEXT: ret +; +; CHECK-PIC-LABEL: bar1: +; CHECK-PIC: // %bb.0: // %entry +; CHECK-PIC-NEXT: adr x8, lsrc +; CHECK-PIC-NEXT: ldrb w8, [x8] +; CHECK-PIC-NEXT: adr x9, ldst +; CHECK-PIC-NEXT: strb w8, [x9] +; CHECK-PIC-NEXT: ret +; +; CHECK-PIC-GLOBISEL-LABEL: bar1: +; CHECK-PIC-GLOBISEL: // %bb.0: // %entry +; CHECK-PIC-GLOBISEL-NEXT: adr x8, lsrc +; CHECK-PIC-GLOBISEL-NEXT: ldrb w8, [x8] +; CHECK-PIC-GLOBISEL-NEXT: adr x9, ldst +; CHECK-PIC-GLOBISEL-NEXT: strb w8, [x9] +; CHECK-PIC-GLOBISEL-NEXT: ret +entry: + %0 = load i8, i8* @lsrc, align 4 + store i8 %0, i8* @ldst, align 4 + ret void +} + +define void @bar2() { +; CHECK-LABEL: bar2: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: adr x8, lptr +; CHECK-NEXT: adr x9, ldst +; CHECK-NEXT: str x9, [x8] +; CHECK-NEXT: ret +; +; CHECK-GLOBISEL-LABEL: bar2: +; CHECK-GLOBISEL: // %bb.0: // %entry +; CHECK-GLOBISEL-NEXT: adr x8, ldst +; CHECK-GLOBISEL-NEXT: adr x9, lptr +; CHECK-GLOBISEL-NEXT: str x8, [x9] +; CHECK-GLOBISEL-NEXT: ret +; +; CHECK-PIC-LABEL: bar2: +; CHECK-PIC: // %bb.0: // %entry +; CHECK-PIC-NEXT: adr x8, lptr +; CHECK-PIC-NEXT: adr x9, ldst +; CHECK-PIC-NEXT: str x9, [x8] +; CHECK-PIC-NEXT: ret +; +; CHECK-PIC-GLOBISEL-LABEL: bar2: +; CHECK-PIC-GLOBISEL: // %bb.0: // %entry +; CHECK-PIC-GLOBISEL-NEXT: adr x8, ldst +; CHECK-PIC-GLOBISEL-NEXT: adr x9, lptr +; CHECK-PIC-GLOBISEL-NEXT: str x8, [x9] +; CHECK-PIC-GLOBISEL-NEXT: ret +entry: + store i8* @ldst, i8** @lptr, align 8 + ret void +} + +define void @bar3() { +; FIXME: Needn't adr lptr +; +; CHECK-LABEL: bar3: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: adr x8, lsrc +; CHECK-NEXT: adr x9, lptr +; CHECK-NEXT: ldrb w8, [x8] +; CHECK-NEXT: ldr x9, [x9] +; CHECK-NEXT: strb w8, [x9] +; CHECK-NEXT: ret +; +; CHECK-GLOBISEL-LABEL: bar3: +; CHECK-GLOBISEL: // %bb.0: // %entry +; CHECK-GLOBISEL-NEXT: adr x8, lsrc +; CHECK-GLOBISEL-NEXT: adr x9, lptr +; CHECK-GLOBISEL-NEXT: ldrb w8, [x8] +; CHECK-GLOBISEL-NEXT: ldr x9, [x9] +; CHECK-GLOBISEL-NEXT: strb w8, [x9] +; CHECK-GLOBISEL-NEXT: ret +; +; CHECK-PIC-LABEL: bar3: +; CHECK-PIC: // %bb.0: // %entry +; CHECK-PIC-NEXT: adr x8, lsrc +; CHECK-PIC-NEXT: adr x9, lptr +; CHECK-PIC-NEXT: ldrb w8, [x8] +; CHECK-PIC-NEXT: ldr x9, [x9] +; CHECK-PIC-NEXT: strb w8, [x9] +; CHECK-PIC-NEXT: ret +; +; CHECK-PIC-GLOBISEL-LABEL: bar3: +; CHECK-PIC-GLOBISEL: // %bb.0: // %entry +; CHECK-PIC-GLOBISEL-NEXT: adr x8, lsrc +; CHECK-PIC-GLOBISEL-NEXT: adr x9, lptr +; CHECK-PIC-GLOBISEL-NEXT: ldrb w8, [x8] +; CHECK-PIC-GLOBISEL-NEXT: ldr x9, [x9] +; CHECK-PIC-GLOBISEL-NEXT: strb w8, [x9] +; CHECK-PIC-GLOBISEL-NEXT: ret +entry: + %0 = load i8, i8* @lsrc, align 4 + %1 = load i8*, i8** @lptr, align 8 + store i8 %0, i8* %1, align 1 + ret void +} + + +@lbsrc = internal global [65536 x i8] zeroinitializer, align 4 +@lbdst = internal global [65536 x i8] zeroinitializer, align 4 + +define void @baz1() { +; CHECK-LABEL: baz1: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: adr x8, lbsrc +; CHECK-NEXT: ldrb w8, [x8] +; CHECK-NEXT: adr x9, lbdst +; CHECK-NEXT: strb w8, [x9] +; CHECK-NEXT: ret +; +; CHECK-GLOBISEL-LABEL: baz1: +; CHECK-GLOBISEL: // %bb.0: // %entry +; CHECK-GLOBISEL-NEXT: adr x8, lbsrc +; CHECK-GLOBISEL-NEXT: ldrb w8, [x8] +; CHECK-GLOBISEL-NEXT: adr x9, lbdst +; CHECK-GLOBISEL-NEXT: strb w8, [x9] +; CHECK-GLOBISEL-NEXT: ret +; +; CHECK-PIC-LABEL: baz1: +; CHECK-PIC: // %bb.0: // %entry +; CHECK-PIC-NEXT: adr x8, lbsrc +; CHECK-PIC-NEXT: ldrb w8, [x8] +; CHECK-PIC-NEXT: adr x9, lbdst +; CHECK-PIC-NEXT: strb w8, [x9] +; CHECK-PIC-NEXT: ret +; +; CHECK-PIC-GLOBISEL-LABEL: baz1: +; CHECK-PIC-GLOBISEL: // %bb.0: // %entry +; CHECK-PIC-GLOBISEL-NEXT: adr x8, lbsrc +; CHECK-PIC-GLOBISEL-NEXT: ldrb w8, [x8] +; CHECK-PIC-GLOBISEL-NEXT: adr x9, lbdst +; CHECK-PIC-GLOBISEL-NEXT: strb w8, [x9] +; CHECK-PIC-GLOBISEL-NEXT: ret +entry: + %0 = load i8, i8* getelementptr inbounds ([65536 x i8], [65536 x i8]* @lbsrc, i64 0, i64 0), align 4 + store i8 %0, i8* getelementptr inbounds ([65536 x i8], [65536 x i8]* @lbdst, i64 0, i64 0), align 4 + ret void +} + +define void @baz2() { +; CHECK-LABEL: baz2: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: adr x8, lptr +; CHECK-NEXT: adr x9, lbdst +; CHECK-NEXT: str x9, [x8] +; CHECK-NEXT: ret +; +; CHECK-GLOBISEL-LABEL: baz2: +; CHECK-GLOBISEL: // %bb.0: // %entry +; CHECK-GLOBISEL-NEXT: adr x8, lbdst +; CHECK-GLOBISEL-NEXT: adr x9, lptr +; CHECK-GLOBISEL-NEXT: str x8, [x9] +; CHECK-GLOBISEL-NEXT: ret +; +; CHECK-PIC-LABEL: baz2: +; CHECK-PIC: // %bb.0: // %entry +; CHECK-PIC-NEXT: adr x8, lptr +; CHECK-PIC-NEXT: adr x9, lbdst +; CHECK-PIC-NEXT: str x9, [x8] +; CHECK-PIC-NEXT: ret +; +; CHECK-PIC-GLOBISEL-LABEL: baz2: +; CHECK-PIC-GLOBISEL: // %bb.0: // %entry +; CHECK-PIC-GLOBISEL-NEXT: adr x8, lbdst +; CHECK-PIC-GLOBISEL-NEXT: adr x9, lptr +; CHECK-PIC-GLOBISEL-NEXT: str x8, [x9] +; CHECK-PIC-GLOBISEL-NEXT: ret +entry: + store i8* getelementptr inbounds ([65536 x i8], [65536 x i8]* @lbdst, i64 0, i64 0), i8** @lptr, align 8 + ret void +} + +define void @baz3() { +; FIXME: Needn't adr lptr +; +; CHECK-LABEL: baz3: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: adr x8, lbsrc +; CHECK-NEXT: adr x9, lptr +; CHECK-NEXT: ldrb w8, [x8] +; CHECK-NEXT: ldr x9, [x9] +; CHECK-NEXT: strb w8, [x9] +; CHECK-NEXT: ret +; +; CHECK-GLOBISEL-LABEL: baz3: +; CHECK-GLOBISEL: // %bb.0: // %entry +; CHECK-GLOBISEL-NEXT: adr x8, lbsrc +; CHECK-GLOBISEL-NEXT: adr x9, lptr +; CHECK-GLOBISEL-NEXT: ldrb w8, [x8] +; CHECK-GLOBISEL-NEXT: ldr x9, [x9] +; CHECK-GLOBISEL-NEXT: strb w8, [x9] +; CHECK-GLOBISEL-NEXT: ret +; +; CHECK-PIC-LABEL: baz3: +; CHECK-PIC: // %bb.0: // %entry +; CHECK-PIC-NEXT: adr x8, lbsrc +; CHECK-PIC-NEXT: adr x9, lptr +; CHECK-PIC-NEXT: ldrb w8, [x8] +; CHECK-PIC-NEXT: ldr x9, [x9] +; CHECK-PIC-NEXT: strb w8, [x9] +; CHECK-PIC-NEXT: ret +; +; CHECK-PIC-GLOBISEL-LABEL: baz3: +; CHECK-PIC-GLOBISEL: // %bb.0: // %entry +; CHECK-PIC-GLOBISEL-NEXT: adr x8, lbsrc +; CHECK-PIC-GLOBISEL-NEXT: adr x9, lptr +; CHECK-PIC-GLOBISEL-NEXT: ldrb w8, [x8] +; CHECK-PIC-GLOBISEL-NEXT: ldr x9, [x9] +; CHECK-PIC-GLOBISEL-NEXT: strb w8, [x9] +; CHECK-PIC-GLOBISEL-NEXT: ret +entry: + %0 = load i8, i8* getelementptr inbounds ([65536 x i8], [65536 x i8]* @lbsrc, i64 0, i64 0), align 4 + %1 = load i8*, i8** @lptr, align 8 + store i8 %0, i8* %1, align 1 + ret void +} + + +declare void @func(...) + +define i8* @externfuncaddr() { +; CHECK-LABEL: externfuncaddr: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: adr x0, func +; CHECK-NEXT: ret +; +; CHECK-GLOBISEL-LABEL: externfuncaddr: +; CHECK-GLOBISEL: // %bb.0: // %entry +; CHECK-GLOBISEL-NEXT: adr x0, func +; CHECK-GLOBISEL-NEXT: ret +; +; CHECK-PIC-LABEL: externfuncaddr: +; CHECK-PIC: // %bb.0: // %entry +; CHECK-PIC-NEXT: ldr x0, :got:func +; CHECK-PIC-NEXT: ret +; +; CHECK-PIC-GLOBISEL-LABEL: externfuncaddr: +; CHECK-PIC-GLOBISEL: // %bb.0: // %entry +; CHECK-PIC-GLOBISEL-NEXT: ldr x0, :got:func +; CHECK-PIC-GLOBISEL-NEXT: ret +entry: + ret i8* bitcast (void (...)* @func to i8*) +} + +define i8* @localfuncaddr() { +; CHECK-LABEL: localfuncaddr: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: adr x0, externfuncaddr +; CHECK-NEXT: ret +; +; CHECK-GLOBISEL-LABEL: localfuncaddr: +; CHECK-GLOBISEL: // %bb.0: // %entry +; CHECK-GLOBISEL-NEXT: adr x0, externfuncaddr +; CHECK-GLOBISEL-NEXT: ret +; +; CHECK-PIC-LABEL: localfuncaddr: +; CHECK-PIC: // %bb.0: // %entry +; CHECK-PIC-NEXT: ldr x0, :got:externfuncaddr +; CHECK-PIC-NEXT: ret +; +; CHECK-PIC-GLOBISEL-LABEL: localfuncaddr: +; CHECK-PIC-GLOBISEL: // %bb.0: // %entry +; CHECK-PIC-GLOBISEL-NEXT: ldr x0, :got:externfuncaddr +; CHECK-PIC-GLOBISEL-NEXT: ret +entry: + ret i8* bitcast (i8* ()* @externfuncaddr to i8*) +} + Index: llvm/trunk/test/CodeGen/AArch64/tiny_supported.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/tiny_supported.ll +++ llvm/trunk/test/CodeGen/AArch64/tiny_supported.ll @@ -0,0 +1,13 @@ +; RUN: llc -verify-machineinstrs -o - -mtriple=aarch64-none-linux-gnu -code-model=tiny < %s 2>&1 | FileCheck %s +; RUN: llc -verify-machineinstrs -o - -mtriple=aarch64-none-eabi -code-model=tiny < %s 2>&1 | FileCheck %s +; RUN: not llc -verify-machineinstrs -o - -mtriple=arm64-apple-darwin -code-model=tiny < %s 2>&1 | FileCheck %s --check-prefix=NOTINY +; RUN: not llc -verify-machineinstrs -o - -mtriple=arm64-apple-ios -code-model=tiny < %s 2>&1 | FileCheck %s --check-prefix=NOTINY +; RUN: not llc -verify-machineinstrs -o - -mtriple=aarch64-unknown-windows-msvc -code-model=tiny < %s 2>&1 | FileCheck %s --check-prefix=NOTINY + +; CHECK-NOT: tiny code model is only supported on ELF +; CHECK-LABEL: foo +; NOTINY: tiny code model is only supported on ELF + +define void @foo() { + ret void +} Index: llvm/trunk/test/MC/AArch64/basic-a64-diagnostics.s =================================================================== --- llvm/trunk/test/MC/AArch64/basic-a64-diagnostics.s +++ llvm/trunk/test/MC/AArch64/basic-a64-diagnostics.s @@ -3315,17 +3315,25 @@ //------------------------------------------------------------------------------ adr sp, loc // expects xzr + adr x0, :got:loc // bad relocation type adrp x3, #20 // Immediate unaligned adrp w2, loc // 64-bit register needed + adrp x5, :got_lo12:loc // bad relocation type // CHECK-ERROR: error: invalid operand for instruction // CHECK-ERROR-NEXT: adr sp, loc // CHECK-ERROR-NEXT: ^ +// CHECK-ERROR-NEXT: error: unexpected adr label +// CHECK-ERROR-NEXT: adr x0, :got:loc +// CHECK-ERROR-NEXT: ^ // CHECK-ERROR-NEXT: error: expected label or encodable integer pc offset // CHECK-ERROR-NEXT: adrp x3, #20 // CHECK-ERROR-NEXT: ^ // CHECK-ERROR-NEXT: error: invalid operand for instruction // CHECK-ERROR-NEXT: adrp w2, loc // CHECK-ERROR-NEXT: ^ +// CHECK-ERROR-NEXT: error: page or gotpage label reference expected +// CHECK-ERROR-NEXT: adrp x5, :got_lo12:loc +// CHECK-ERROR-NEXT: ^ adr x9, #1048576 adr x2, #-1048577