diff --git a/llvm/include/llvm/MC/MCExpr.h b/llvm/include/llvm/MC/MCExpr.h --- a/llvm/include/llvm/MC/MCExpr.h +++ b/llvm/include/llvm/MC/MCExpr.h @@ -299,6 +299,7 @@ VK_PPC_TLSGD, // symbol@tlsgd VK_PPC_AIX_TLSGD, // symbol@gd VK_PPC_AIX_TLSGDM, // symbol@m + VK_PPC_AIX_TLSIE, // symbol@ie VK_PPC_AIX_TLSLE, // symbol@le VK_PPC_GOT_TLSLD, // symbol@got@tlsld VK_PPC_GOT_TLSLD_LO, // symbol@got@tlsld@l diff --git a/llvm/lib/MC/MCExpr.cpp b/llvm/lib/MC/MCExpr.cpp --- a/llvm/lib/MC/MCExpr.cpp +++ b/llvm/lib/MC/MCExpr.cpp @@ -327,6 +327,8 @@ return "gd"; case VK_PPC_AIX_TLSGDM: return "m"; + case VK_PPC_AIX_TLSIE: + return "ie"; case VK_PPC_AIX_TLSLE: return "le"; case VK_PPC_GOT_TLSLD: return "got@tlsld"; diff --git a/llvm/lib/MC/XCOFFObjectWriter.cpp b/llvm/lib/MC/XCOFFObjectWriter.cpp --- a/llvm/lib/MC/XCOFFObjectWriter.cpp +++ b/llvm/lib/MC/XCOFFObjectWriter.cpp @@ -697,7 +697,8 @@ const uint32_t Index = getIndex(SymA, SymASec); if (Type == XCOFF::RelocationType::R_POS || Type == XCOFF::RelocationType::R_TLS || - Type == XCOFF::RelocationType::R_TLS_LE) + Type == XCOFF::RelocationType::R_TLS_LE || + Type == XCOFF::RelocationType::R_TLS_IE) // The FixedValue should be symbol's virtual address in this object file // plus any constant value that we might get. FixedValue = getVirtualAddress(SymA, SymASec) + Target.getConstant(); diff --git a/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp b/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp --- a/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp +++ b/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp @@ -148,9 +148,10 @@ cast(Streamer.getCurrentSectionOnly()) ->getQualNameSymbol(); // On AIX, we have a region handle (symbol@m) and the variable offset - // (symbol@{gd|le}) for TLS variables, depending on the TLS model. + // (symbol@{gd|ie|le}) for TLS variables, depending on the TLS model. if (Kind == MCSymbolRefExpr::VariantKind::VK_PPC_AIX_TLSGD || Kind == MCSymbolRefExpr::VariantKind::VK_PPC_AIX_TLSGDM || + Kind == MCSymbolRefExpr::VariantKind::VK_PPC_AIX_TLSIE || Kind == MCSymbolRefExpr::VariantKind::VK_PPC_AIX_TLSLE) OS << "\t.tc " << TCSym->getName() << "," << XSym->getName() << "@" << MCSymbolRefExpr::getVariantKindName(Kind) << '\n'; diff --git a/llvm/lib/Target/PowerPC/MCTargetDesc/PPCXCOFFObjectWriter.cpp b/llvm/lib/Target/PowerPC/MCTargetDesc/PPCXCOFFObjectWriter.cpp --- a/llvm/lib/Target/PowerPC/MCTargetDesc/PPCXCOFFObjectWriter.cpp +++ b/llvm/lib/Target/PowerPC/MCTargetDesc/PPCXCOFFObjectWriter.cpp @@ -108,6 +108,8 @@ return {XCOFF::RelocationType::R_TLS, SignAndSizeForFKData}; case MCSymbolRefExpr::VK_PPC_AIX_TLSGDM: return {XCOFF::RelocationType::R_TLSM, SignAndSizeForFKData}; + case MCSymbolRefExpr::VK_PPC_AIX_TLSIE: + return {XCOFF::RelocationType::R_TLS_IE, SignAndSizeForFKData}; case MCSymbolRefExpr::VK_PPC_AIX_TLSLE: return {XCOFF::RelocationType::R_TLS_LE, SignAndSizeForFKData}; case MCSymbolRefExpr::VK_None: diff --git a/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp b/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp --- a/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp +++ b/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp @@ -827,16 +827,16 @@ return Expr; }; auto GetVKForMO = [&](const MachineOperand &MO) { - // For TLS local-exec accesses on AIX, we have one TOC entry for the symbol - // (with the variable offset), which is differentiated by MO_TPREL_FLAG. + // For TLS initial-exec and local-exec accesses on AIX, we have one TOC + // entry for the symbol (with the variable offset), which is differentiated + // by MO_TPREL_FLAG. if (MO.getTargetFlags() & PPCII::MO_TPREL_FLAG) { - // TODO: Update the query and the comment above to add a check for initial - // exec when this TLS model is supported on AIX in the future, as both - // local-exec and initial-exec can use MO_TPREL_FLAG. assert(MO.isGlobal() && "Only expecting a global MachineOperand here!\n"); TLSModel::Model Model = TM.getTLSModel(MO.getGlobal()); if (Model == TLSModel::LocalExec) return MCSymbolRefExpr::VariantKind::VK_PPC_AIX_TLSLE; + if (Model == TLSModel::InitialExec) + return MCSymbolRefExpr::VariantKind::VK_PPC_AIX_TLSIE; llvm_unreachable("Only expecting local-exec accesses!"); } // For GD TLS access on AIX, we have two TOC entries for the symbol (one for diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp --- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -3326,7 +3326,7 @@ bool Is64Bit = Subtarget.isPPC64(); TLSModel::Model Model = getTargetMachine().getTLSModel(GV); - if (Model == TLSModel::LocalExec) { + if (Model == TLSModel::LocalExec || Model == TLSModel::InitialExec) { SDValue VariableOffsetTGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, PPCII::MO_TPREL_FLAG); SDValue VariableOffset = getTOCEntry(DAG, dl, VariableOffsetTGA); @@ -3351,9 +3351,9 @@ return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TLSReg, VariableOffset); } - // The Local-Exec and General-Dynamic TLS models are currently the only - // supported access models. If Local-exec is not possible or specified, all - // GlobalTLSAddress nodes are lowered using the general-dynamic model. + // Only Local-Exec, Initial-Exec and General-Dynamic TLS models are currently + // supported models. If Local- or Initial-exec are not possible or specified, + // all GlobalTLSAddress nodes are lowered using the general-dynamic model. // We need to generate two TOC entries, one for the variable offset, one for // the region handle. The global address for the TOC entry of the region // handle is created with the MO_TLSGDM_FLAG flag and the global address diff --git a/llvm/test/CodeGen/PowerPC/aix-tls-ie-ldst.ll b/llvm/test/CodeGen/PowerPC/aix-tls-ie-ldst.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/PowerPC/aix-tls-ie-ldst.ll @@ -0,0 +1,1559 @@ +; RUN: llc -verify-machineinstrs -mcpu=pwr7 -ppc-asm-full-reg-names \ +; RUN: -mtriple powerpc64-ibm-aix-xcoff -code-model=large < %s | \ +; RUN: FileCheck %s -check-prefix=LARGE64 +; RUN: llc -verify-machineinstrs -mcpu=pwr7 -ppc-asm-full-reg-names \ +; RUN: -mtriple powerpc64-ibm-aix-xcoff -code-model=small < %s | \ +; RUN: FileCheck %s -check-prefix=SMALL64 +; RUN: llc -verify-machineinstrs -mcpu=pwr7 -ppc-asm-full-reg-names \ +; RUN: -mtriple powerpc-ibm-aix-xcoff -code-model=large < %s | \ +; RUN: FileCheck %s -check-prefix=LARGE32 +; RUN: llc -verify-machineinstrs -mcpu=pwr7 -ppc-asm-full-reg-names \ +; RUN: -mtriple powerpc-ibm-aix-xcoff -code-model=small < %s | \ +; RUN: FileCheck %s -check-prefix=SMALL32 + +@global_int_zero = thread_local(initialexec) global i32 0, align 4 +@global_int_nonzero = thread_local(initialexec) global i32 1, align 4 +@intern_int_zero = internal thread_local(initialexec) global i32 0, align 4 +@intern_int_nonzero = internal thread_local(initialexec) global i32 1, align 4 + +@global_long_zero = thread_local(initialexec) global i64 0, align 8 +@global_long_nonzero = thread_local(initialexec) global i64 1, align 8 +@intern_long_zero = internal thread_local(initialexec) global i64 0, align 8 +@intern_long_nonzero = internal thread_local(initialexec) global i64 1, align 8 + +@global_float_zero = thread_local(initialexec) global float 0.000000, align 4 +@global_float_nonzero = thread_local(initialexec) global float 1.000000, align 4 +@intern_float_zero = internal thread_local(initialexec) global float 0.000000, align 4 +@intern_float_nonzero = internal thread_local(initialexec) global float 1.000000, align 4 + +@global_double_zero = thread_local(initialexec) global double 0.000000, align 8 +@global_double_nonzero = thread_local(initialexec) global double 1.000000, align 8 +@intern_double_zero = internal thread_local(initialexec) global double 0.000000, align 8 +@intern_double_nonzero = internal thread_local(initialexec) global double 1.000000, align 8 + +define void @store_global_int_zero(i32 noundef signext %i) { +; LARGE64-LABEL: store_global_int_zero: +; LARGE64: # %bb.0: # %entry +; LARGE64-NEXT: addis r4, L..C0@u(r2) +; LARGE64-NEXT: ld r4, L..C0@l(r4) +; LARGE64-NEXT: stwx r3, r13, r4 +; LARGE64-NEXT: blr +; +; SMALL64-LABEL: store_global_int_zero: +; SMALL64: # %bb.0: # %entry +; SMALL64-NEXT: ld r4, L..C0(r2) # target-flags(ppc-tprel) @global_int_zero +; SMALL64-NEXT: stwx r3, r13, r4 +; SMALL64-NEXT: blr +; +; LARGE32-LABEL: store_global_int_zero: +; LARGE32: # %bb.0: # %entry +; LARGE32-NEXT: mflr r0 +; LARGE32-NEXT: stwu r1, -32(r1) +; LARGE32-NEXT: stw r0, 40(r1) +; LARGE32-NEXT: mr r4, r3 +; LARGE32-NEXT: addis r3, L..C0@u(r2) +; LARGE32-NEXT: lwz r5, L..C0@l(r3) +; LARGE32-NEXT: bla .__get_tpointer[PR] +; LARGE32-NEXT: stwx r4, r3, r5 +; LARGE32-NEXT: addi r1, r1, 32 +; LARGE32-NEXT: lwz r0, 8(r1) +; LARGE32-NEXT: mtlr r0 +; LARGE32-NEXT: blr +; +; SMALL32-LABEL: store_global_int_zero: +; SMALL32: # %bb.0: # %entry +; SMALL32-NEXT: mflr r0 +; SMALL32-NEXT: stwu r1, -32(r1) +; SMALL32-NEXT: lwz r5, L..C0(r2) # target-flags(ppc-tprel) @global_int_zero +; SMALL32-NEXT: mr r4, r3 +; SMALL32-NEXT: bla .__get_tpointer[PR] +; SMALL32-NEXT: stw r0, 40(r1) +; SMALL32-NEXT: stwx r4, r3, r5 +; SMALL32-NEXT: addi r1, r1, 32 +; SMALL32-NEXT: lwz r0, 8(r1) +; SMALL32-NEXT: mtlr r0 +; SMALL32-NEXT: blr +entry: + %addr = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @global_int_zero) + store i32 %i, ptr %addr, align 4 + ret void +} + +define void @store_global_int_nonzero(i32 noundef signext %i) { +; LARGE64-LABEL: store_global_int_nonzero: +; LARGE64: # %bb.0: # %entry +; LARGE64-NEXT: addis r4, L..C1@u(r2) +; LARGE64-NEXT: ld r4, L..C1@l(r4) +; LARGE64-NEXT: stwx r3, r13, r4 +; LARGE64-NEXT: blr +; +; SMALL64-LABEL: store_global_int_nonzero: +; SMALL64: # %bb.0: # %entry +; SMALL64-NEXT: ld r4, L..C1(r2) # target-flags(ppc-tprel) @global_int_nonzero +; SMALL64-NEXT: stwx r3, r13, r4 +; SMALL64-NEXT: blr +; +; LARGE32-LABEL: store_global_int_nonzero: +; LARGE32: # %bb.0: # %entry +; LARGE32-NEXT: mflr r0 +; LARGE32-NEXT: stwu r1, -32(r1) +; LARGE32-NEXT: stw r0, 40(r1) +; LARGE32-NEXT: mr r4, r3 +; LARGE32-NEXT: addis r3, L..C1@u(r2) +; LARGE32-NEXT: lwz r5, L..C1@l(r3) +; LARGE32-NEXT: bla .__get_tpointer[PR] +; LARGE32-NEXT: stwx r4, r3, r5 +; LARGE32-NEXT: addi r1, r1, 32 +; LARGE32-NEXT: lwz r0, 8(r1) +; LARGE32-NEXT: mtlr r0 +; LARGE32-NEXT: blr +; +; SMALL32-LABEL: store_global_int_nonzero: +; SMALL32: # %bb.0: # %entry +; SMALL32-NEXT: mflr r0 +; SMALL32-NEXT: stwu r1, -32(r1) +; SMALL32-NEXT: lwz r5, L..C1(r2) # target-flags(ppc-tprel) @global_int_nonzero +; SMALL32-NEXT: mr r4, r3 +; SMALL32-NEXT: bla .__get_tpointer[PR] +; SMALL32-NEXT: stw r0, 40(r1) +; SMALL32-NEXT: stwx r4, r3, r5 +; SMALL32-NEXT: addi r1, r1, 32 +; SMALL32-NEXT: lwz r0, 8(r1) +; SMALL32-NEXT: mtlr r0 +; SMALL32-NEXT: blr +entry: + %addr = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @global_int_nonzero) + store i32 %i, ptr %addr, align 4 + ret void +} + +define void @store_intern_int_zero(i32 noundef signext %i) { +; LARGE64-LABEL: store_intern_int_zero: +; LARGE64: # %bb.0: # %entry +; LARGE64-NEXT: addis r4, L..C2@u(r2) +; LARGE64-NEXT: ld r4, L..C2@l(r4) +; LARGE64-NEXT: stwx r3, r13, r4 +; LARGE64-NEXT: blr +; +; SMALL64-LABEL: store_intern_int_zero: +; SMALL64: # %bb.0: # %entry +; SMALL64-NEXT: ld r4, L..C2(r2) # target-flags(ppc-tprel) @intern_int_zero +; SMALL64-NEXT: stwx r3, r13, r4 +; SMALL64-NEXT: blr +; +; LARGE32-LABEL: store_intern_int_zero: +; LARGE32: # %bb.0: # %entry +; LARGE32-NEXT: mflr r0 +; LARGE32-NEXT: stwu r1, -32(r1) +; LARGE32-NEXT: stw r0, 40(r1) +; LARGE32-NEXT: mr r4, r3 +; LARGE32-NEXT: addis r3, L..C2@u(r2) +; LARGE32-NEXT: lwz r5, L..C2@l(r3) +; LARGE32-NEXT: bla .__get_tpointer[PR] +; LARGE32-NEXT: stwx r4, r3, r5 +; LARGE32-NEXT: addi r1, r1, 32 +; LARGE32-NEXT: lwz r0, 8(r1) +; LARGE32-NEXT: mtlr r0 +; LARGE32-NEXT: blr +; +; SMALL32-LABEL: store_intern_int_zero: +; SMALL32: # %bb.0: # %entry +; SMALL32-NEXT: mflr r0 +; SMALL32-NEXT: stwu r1, -32(r1) +; SMALL32-NEXT: lwz r5, L..C2(r2) # target-flags(ppc-tprel) @intern_int_zero +; SMALL32-NEXT: mr r4, r3 +; SMALL32-NEXT: bla .__get_tpointer[PR] +; SMALL32-NEXT: stw r0, 40(r1) +; SMALL32-NEXT: stwx r4, r3, r5 +; SMALL32-NEXT: addi r1, r1, 32 +; SMALL32-NEXT: lwz r0, 8(r1) +; SMALL32-NEXT: mtlr r0 +; SMALL32-NEXT: blr +entry: + %addr = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @intern_int_zero) + store i32 %i, ptr %addr, align 4 + ret void +} + +define void @store_intern_int_nonzero(i32 noundef signext %i) { +; LARGE64-LABEL: store_intern_int_nonzero: +; LARGE64: # %bb.0: # %entry +; LARGE64-NEXT: addis r4, L..C3@u(r2) +; LARGE64-NEXT: ld r4, L..C3@l(r4) +; LARGE64-NEXT: stwx r3, r13, r4 +; LARGE64-NEXT: blr +; +; SMALL64-LABEL: store_intern_int_nonzero: +; SMALL64: # %bb.0: # %entry +; SMALL64-NEXT: ld r4, L..C3(r2) # target-flags(ppc-tprel) @intern_int_nonzero +; SMALL64-NEXT: stwx r3, r13, r4 +; SMALL64-NEXT: blr +; +; LARGE32-LABEL: store_intern_int_nonzero: +; LARGE32: # %bb.0: # %entry +; LARGE32-NEXT: mflr r0 +; LARGE32-NEXT: stwu r1, -32(r1) +; LARGE32-NEXT: stw r0, 40(r1) +; LARGE32-NEXT: mr r4, r3 +; LARGE32-NEXT: addis r3, L..C3@u(r2) +; LARGE32-NEXT: lwz r5, L..C3@l(r3) +; LARGE32-NEXT: bla .__get_tpointer[PR] +; LARGE32-NEXT: stwx r4, r3, r5 +; LARGE32-NEXT: addi r1, r1, 32 +; LARGE32-NEXT: lwz r0, 8(r1) +; LARGE32-NEXT: mtlr r0 +; LARGE32-NEXT: blr +; +; SMALL32-LABEL: store_intern_int_nonzero: +; SMALL32: # %bb.0: # %entry +; SMALL32-NEXT: mflr r0 +; SMALL32-NEXT: stwu r1, -32(r1) +; SMALL32-NEXT: lwz r5, L..C3(r2) # target-flags(ppc-tprel) @intern_int_nonzero +; SMALL32-NEXT: mr r4, r3 +; SMALL32-NEXT: bla .__get_tpointer[PR] +; SMALL32-NEXT: stw r0, 40(r1) +; SMALL32-NEXT: stwx r4, r3, r5 +; SMALL32-NEXT: addi r1, r1, 32 +; SMALL32-NEXT: lwz r0, 8(r1) +; SMALL32-NEXT: mtlr r0 +; SMALL32-NEXT: blr +entry: + %addr = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @intern_int_nonzero) + store i32 %i, ptr %addr, align 4 + ret void +} + +define signext i32 @load_global_int_zero() { +; LARGE64-LABEL: load_global_int_zero: +; LARGE64: # %bb.0: # %entry +; LARGE64-NEXT: addis r3, L..C0@u(r2) +; LARGE64-NEXT: ld r3, L..C0@l(r3) +; LARGE64-NEXT: lwax r3, r13, r3 +; LARGE64-NEXT: blr +; +; SMALL64-LABEL: load_global_int_zero: +; SMALL64: # %bb.0: # %entry +; SMALL64-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tprel) @global_int_zero +; SMALL64-NEXT: lwax r3, r13, r3 +; SMALL64-NEXT: blr +; +; LARGE32-LABEL: load_global_int_zero: +; LARGE32: # %bb.0: # %entry +; LARGE32-NEXT: mflr r0 +; LARGE32-NEXT: stwu r1, -32(r1) +; LARGE32-NEXT: stw r0, 40(r1) +; LARGE32-NEXT: addis r3, L..C0@u(r2) +; LARGE32-NEXT: lwz r4, L..C0@l(r3) +; LARGE32-NEXT: bla .__get_tpointer[PR] +; LARGE32-NEXT: lwzx r3, r3, r4 +; LARGE32-NEXT: addi r1, r1, 32 +; LARGE32-NEXT: lwz r0, 8(r1) +; LARGE32-NEXT: mtlr r0 +; LARGE32-NEXT: blr +; +; SMALL32-LABEL: load_global_int_zero: +; SMALL32: # %bb.0: # %entry +; SMALL32-NEXT: mflr r0 +; SMALL32-NEXT: stwu r1, -32(r1) +; SMALL32-NEXT: lwz r4, L..C0(r2) # target-flags(ppc-tprel) @global_int_zero +; SMALL32-NEXT: stw r0, 40(r1) +; SMALL32-NEXT: bla .__get_tpointer[PR] +; SMALL32-NEXT: lwzx r3, r3, r4 +; SMALL32-NEXT: addi r1, r1, 32 +; SMALL32-NEXT: lwz r0, 8(r1) +; SMALL32-NEXT: mtlr r0 +; SMALL32-NEXT: blr +entry: + %addr = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @global_int_zero) + %val = load i32, ptr %addr, align 4 + ret i32 %val +} + +define signext i32 @load_global_int_nonzero() { +; LARGE64-LABEL: load_global_int_nonzero: +; LARGE64: # %bb.0: # %entry +; LARGE64-NEXT: addis r3, L..C1@u(r2) +; LARGE64-NEXT: ld r3, L..C1@l(r3) +; LARGE64-NEXT: lwax r3, r13, r3 +; LARGE64-NEXT: blr +; +; SMALL64-LABEL: load_global_int_nonzero: +; SMALL64: # %bb.0: # %entry +; SMALL64-NEXT: ld r3, L..C1(r2) # target-flags(ppc-tprel) @global_int_nonzero +; SMALL64-NEXT: lwax r3, r13, r3 +; SMALL64-NEXT: blr +; +; LARGE32-LABEL: load_global_int_nonzero: +; LARGE32: # %bb.0: # %entry +; LARGE32-NEXT: mflr r0 +; LARGE32-NEXT: stwu r1, -32(r1) +; LARGE32-NEXT: stw r0, 40(r1) +; LARGE32-NEXT: addis r3, L..C1@u(r2) +; LARGE32-NEXT: lwz r4, L..C1@l(r3) +; LARGE32-NEXT: bla .__get_tpointer[PR] +; LARGE32-NEXT: lwzx r3, r3, r4 +; LARGE32-NEXT: addi r1, r1, 32 +; LARGE32-NEXT: lwz r0, 8(r1) +; LARGE32-NEXT: mtlr r0 +; LARGE32-NEXT: blr +; +; SMALL32-LABEL: load_global_int_nonzero: +; SMALL32: # %bb.0: # %entry +; SMALL32-NEXT: mflr r0 +; SMALL32-NEXT: stwu r1, -32(r1) +; SMALL32-NEXT: lwz r4, L..C1(r2) # target-flags(ppc-tprel) @global_int_nonzero +; SMALL32-NEXT: stw r0, 40(r1) +; SMALL32-NEXT: bla .__get_tpointer[PR] +; SMALL32-NEXT: lwzx r3, r3, r4 +; SMALL32-NEXT: addi r1, r1, 32 +; SMALL32-NEXT: lwz r0, 8(r1) +; SMALL32-NEXT: mtlr r0 +; SMALL32-NEXT: blr +entry: + %addr = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @global_int_nonzero) + %val = load i32, ptr %addr, align 4 + ret i32 %val +} + +define signext i32 @load_intern_int_zero() { +; LARGE64-LABEL: load_intern_int_zero: +; LARGE64: # %bb.0: # %entry +; LARGE64-NEXT: addis r3, L..C2@u(r2) +; LARGE64-NEXT: ld r3, L..C2@l(r3) +; LARGE64-NEXT: lwax r3, r13, r3 +; LARGE64-NEXT: blr +; +; SMALL64-LABEL: load_intern_int_zero: +; SMALL64: # %bb.0: # %entry +; SMALL64-NEXT: ld r3, L..C2(r2) # target-flags(ppc-tprel) @intern_int_zero +; SMALL64-NEXT: lwax r3, r13, r3 +; SMALL64-NEXT: blr +; +; LARGE32-LABEL: load_intern_int_zero: +; LARGE32: # %bb.0: # %entry +; LARGE32-NEXT: mflr r0 +; LARGE32-NEXT: stwu r1, -32(r1) +; LARGE32-NEXT: stw r0, 40(r1) +; LARGE32-NEXT: addis r3, L..C2@u(r2) +; LARGE32-NEXT: lwz r4, L..C2@l(r3) +; LARGE32-NEXT: bla .__get_tpointer[PR] +; LARGE32-NEXT: lwzx r3, r3, r4 +; LARGE32-NEXT: addi r1, r1, 32 +; LARGE32-NEXT: lwz r0, 8(r1) +; LARGE32-NEXT: mtlr r0 +; LARGE32-NEXT: blr +; +; SMALL32-LABEL: load_intern_int_zero: +; SMALL32: # %bb.0: # %entry +; SMALL32-NEXT: mflr r0 +; SMALL32-NEXT: stwu r1, -32(r1) +; SMALL32-NEXT: lwz r4, L..C2(r2) # target-flags(ppc-tprel) @intern_int_zero +; SMALL32-NEXT: stw r0, 40(r1) +; SMALL32-NEXT: bla .__get_tpointer[PR] +; SMALL32-NEXT: lwzx r3, r3, r4 +; SMALL32-NEXT: addi r1, r1, 32 +; SMALL32-NEXT: lwz r0, 8(r1) +; SMALL32-NEXT: mtlr r0 +; SMALL32-NEXT: blr +entry: + %addr = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @intern_int_zero) + %val = load i32, ptr %addr, align 4 + ret i32 %val +} + +define signext i32 @load_intern_int_nonzero() { +; LARGE64-LABEL: load_intern_int_nonzero: +; LARGE64: # %bb.0: # %entry +; LARGE64-NEXT: addis r3, L..C3@u(r2) +; LARGE64-NEXT: ld r3, L..C3@l(r3) +; LARGE64-NEXT: lwax r3, r13, r3 +; LARGE64-NEXT: blr +; +; SMALL64-LABEL: load_intern_int_nonzero: +; SMALL64: # %bb.0: # %entry +; SMALL64-NEXT: ld r3, L..C3(r2) # target-flags(ppc-tprel) @intern_int_nonzero +; SMALL64-NEXT: lwax r3, r13, r3 +; SMALL64-NEXT: blr +; +; LARGE32-LABEL: load_intern_int_nonzero: +; LARGE32: # %bb.0: # %entry +; LARGE32-NEXT: mflr r0 +; LARGE32-NEXT: stwu r1, -32(r1) +; LARGE32-NEXT: stw r0, 40(r1) +; LARGE32-NEXT: addis r3, L..C3@u(r2) +; LARGE32-NEXT: lwz r4, L..C3@l(r3) +; LARGE32-NEXT: bla .__get_tpointer[PR] +; LARGE32-NEXT: lwzx r3, r3, r4 +; LARGE32-NEXT: addi r1, r1, 32 +; LARGE32-NEXT: lwz r0, 8(r1) +; LARGE32-NEXT: mtlr r0 +; LARGE32-NEXT: blr +; +; SMALL32-LABEL: load_intern_int_nonzero: +; SMALL32: # %bb.0: # %entry +; SMALL32-NEXT: mflr r0 +; SMALL32-NEXT: stwu r1, -32(r1) +; SMALL32-NEXT: lwz r4, L..C3(r2) # target-flags(ppc-tprel) @intern_int_nonzero +; SMALL32-NEXT: stw r0, 40(r1) +; SMALL32-NEXT: bla .__get_tpointer[PR] +; SMALL32-NEXT: lwzx r3, r3, r4 +; SMALL32-NEXT: addi r1, r1, 32 +; SMALL32-NEXT: lwz r0, 8(r1) +; SMALL32-NEXT: mtlr r0 +; SMALL32-NEXT: blr +entry: + %addr = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @intern_int_nonzero) + %val = load i32, ptr %addr, align 4 + ret i32 %val +} + +define void @store_global_long_zero(i64 noundef %i) { +; LARGE64-LABEL: store_global_long_zero: +; LARGE64: # %bb.0: # %entry +; LARGE64-NEXT: addis r4, L..C4@u(r2) +; LARGE64-NEXT: ld r4, L..C4@l(r4) +; LARGE64-NEXT: stdx r3, r13, r4 +; LARGE64-NEXT: blr +; +; SMALL64-LABEL: store_global_long_zero: +; SMALL64: # %bb.0: # %entry +; SMALL64-NEXT: ld r4, L..C4(r2) # target-flags(ppc-tprel) @global_long_zero +; SMALL64-NEXT: stdx r3, r13, r4 +; SMALL64-NEXT: blr +; +; LARGE32-LABEL: store_global_long_zero: +; LARGE32: # %bb.0: # %entry +; LARGE32-NEXT: mflr r0 +; LARGE32-NEXT: stwu r1, -32(r1) +; LARGE32-NEXT: stw r0, 40(r1) +; LARGE32-NEXT: mr r5, r3 +; LARGE32-NEXT: addis r3, L..C4@u(r2) +; LARGE32-NEXT: lwz r6, L..C4@l(r3) +; LARGE32-NEXT: bla .__get_tpointer[PR] +; LARGE32-NEXT: add r3, r3, r6 +; LARGE32-NEXT: stw r4, 4(r3) +; LARGE32-NEXT: stw r5, 0(r3) +; LARGE32-NEXT: addi r1, r1, 32 +; LARGE32-NEXT: lwz r0, 8(r1) +; LARGE32-NEXT: mtlr r0 +; LARGE32-NEXT: blr +; +; SMALL32-LABEL: store_global_long_zero: +; SMALL32: # %bb.0: # %entry +; SMALL32-NEXT: mflr r0 +; SMALL32-NEXT: stwu r1, -32(r1) +; SMALL32-NEXT: lwz r6, L..C4(r2) # target-flags(ppc-tprel) @global_long_zero +; SMALL32-NEXT: mr r5, r3 +; SMALL32-NEXT: bla .__get_tpointer[PR] +; SMALL32-NEXT: stw r0, 40(r1) +; SMALL32-NEXT: add r3, r3, r6 +; SMALL32-NEXT: stw r4, 4(r3) +; SMALL32-NEXT: stw r5, 0(r3) +; SMALL32-NEXT: addi r1, r1, 32 +; SMALL32-NEXT: lwz r0, 8(r1) +; SMALL32-NEXT: mtlr r0 +; SMALL32-NEXT: blr +entry: + %addr = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @global_long_zero) + store i64 %i, ptr %addr, align 8 + ret void +} + +define void @store_global_long_nonzero(i64 noundef %i) { +; LARGE64-LABEL: store_global_long_nonzero: +; LARGE64: # %bb.0: # %entry +; LARGE64-NEXT: addis r4, L..C5@u(r2) +; LARGE64-NEXT: ld r4, L..C5@l(r4) +; LARGE64-NEXT: stdx r3, r13, r4 +; LARGE64-NEXT: blr +; +; SMALL64-LABEL: store_global_long_nonzero: +; SMALL64: # %bb.0: # %entry +; SMALL64-NEXT: ld r4, L..C5(r2) # target-flags(ppc-tprel) @global_long_nonzero +; SMALL64-NEXT: stdx r3, r13, r4 +; SMALL64-NEXT: blr +; +; LARGE32-LABEL: store_global_long_nonzero: +; LARGE32: # %bb.0: # %entry +; LARGE32-NEXT: mflr r0 +; LARGE32-NEXT: stwu r1, -32(r1) +; LARGE32-NEXT: stw r0, 40(r1) +; LARGE32-NEXT: mr r5, r3 +; LARGE32-NEXT: addis r3, L..C5@u(r2) +; LARGE32-NEXT: lwz r6, L..C5@l(r3) +; LARGE32-NEXT: bla .__get_tpointer[PR] +; LARGE32-NEXT: add r3, r3, r6 +; LARGE32-NEXT: stw r4, 4(r3) +; LARGE32-NEXT: stw r5, 0(r3) +; LARGE32-NEXT: addi r1, r1, 32 +; LARGE32-NEXT: lwz r0, 8(r1) +; LARGE32-NEXT: mtlr r0 +; LARGE32-NEXT: blr +; +; SMALL32-LABEL: store_global_long_nonzero: +; SMALL32: # %bb.0: # %entry +; SMALL32-NEXT: mflr r0 +; SMALL32-NEXT: stwu r1, -32(r1) +; SMALL32-NEXT: lwz r6, L..C5(r2) # target-flags(ppc-tprel) @global_long_nonzero +; SMALL32-NEXT: mr r5, r3 +; SMALL32-NEXT: bla .__get_tpointer[PR] +; SMALL32-NEXT: stw r0, 40(r1) +; SMALL32-NEXT: add r3, r3, r6 +; SMALL32-NEXT: stw r4, 4(r3) +; SMALL32-NEXT: stw r5, 0(r3) +; SMALL32-NEXT: addi r1, r1, 32 +; SMALL32-NEXT: lwz r0, 8(r1) +; SMALL32-NEXT: mtlr r0 +; SMALL32-NEXT: blr +entry: + %addr = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @global_long_nonzero) + store i64 %i, ptr %addr, align 8 + ret void +} + +define void @store_intern_long_zero(i64 noundef %i) { +; LARGE64-LABEL: store_intern_long_zero: +; LARGE64: # %bb.0: # %entry +; LARGE64-NEXT: addis r4, L..C6@u(r2) +; LARGE64-NEXT: ld r4, L..C6@l(r4) +; LARGE64-NEXT: stdx r3, r13, r4 +; LARGE64-NEXT: blr +; +; SMALL64-LABEL: store_intern_long_zero: +; SMALL64: # %bb.0: # %entry +; SMALL64-NEXT: ld r4, L..C6(r2) # target-flags(ppc-tprel) @intern_long_zero +; SMALL64-NEXT: stdx r3, r13, r4 +; SMALL64-NEXT: blr +; +; LARGE32-LABEL: store_intern_long_zero: +; LARGE32: # %bb.0: # %entry +; LARGE32-NEXT: mflr r0 +; LARGE32-NEXT: stwu r1, -32(r1) +; LARGE32-NEXT: stw r0, 40(r1) +; LARGE32-NEXT: mr r5, r3 +; LARGE32-NEXT: addis r3, L..C6@u(r2) +; LARGE32-NEXT: lwz r6, L..C6@l(r3) +; LARGE32-NEXT: bla .__get_tpointer[PR] +; LARGE32-NEXT: add r3, r3, r6 +; LARGE32-NEXT: stw r4, 4(r3) +; LARGE32-NEXT: stw r5, 0(r3) +; LARGE32-NEXT: addi r1, r1, 32 +; LARGE32-NEXT: lwz r0, 8(r1) +; LARGE32-NEXT: mtlr r0 +; LARGE32-NEXT: blr +; +; SMALL32-LABEL: store_intern_long_zero: +; SMALL32: # %bb.0: # %entry +; SMALL32-NEXT: mflr r0 +; SMALL32-NEXT: stwu r1, -32(r1) +; SMALL32-NEXT: lwz r6, L..C6(r2) # target-flags(ppc-tprel) @intern_long_zero +; SMALL32-NEXT: mr r5, r3 +; SMALL32-NEXT: bla .__get_tpointer[PR] +; SMALL32-NEXT: stw r0, 40(r1) +; SMALL32-NEXT: add r3, r3, r6 +; SMALL32-NEXT: stw r4, 4(r3) +; SMALL32-NEXT: stw r5, 0(r3) +; SMALL32-NEXT: addi r1, r1, 32 +; SMALL32-NEXT: lwz r0, 8(r1) +; SMALL32-NEXT: mtlr r0 +; SMALL32-NEXT: blr +entry: + %addr = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @intern_long_zero) + store i64 %i, ptr %addr, align 8 + ret void +} + +define void @store_intern_long_nonzero(i64 noundef %i) { +; LARGE64-LABEL: store_intern_long_nonzero: +; LARGE64: # %bb.0: # %entry +; LARGE64-NEXT: addis r4, L..C7@u(r2) +; LARGE64-NEXT: ld r4, L..C7@l(r4) +; LARGE64-NEXT: stdx r3, r13, r4 +; LARGE64-NEXT: blr +; +; SMALL64-LABEL: store_intern_long_nonzero: +; SMALL64: # %bb.0: # %entry +; SMALL64-NEXT: ld r4, L..C7(r2) # target-flags(ppc-tprel) @intern_long_nonzero +; SMALL64-NEXT: stdx r3, r13, r4 +; SMALL64-NEXT: blr +; +; LARGE32-LABEL: store_intern_long_nonzero: +; LARGE32: # %bb.0: # %entry +; LARGE32-NEXT: mflr r0 +; LARGE32-NEXT: stwu r1, -32(r1) +; LARGE32-NEXT: stw r0, 40(r1) +; LARGE32-NEXT: mr r5, r3 +; LARGE32-NEXT: addis r3, L..C7@u(r2) +; LARGE32-NEXT: lwz r6, L..C7@l(r3) +; LARGE32-NEXT: bla .__get_tpointer[PR] +; LARGE32-NEXT: add r3, r3, r6 +; LARGE32-NEXT: stw r4, 4(r3) +; LARGE32-NEXT: stw r5, 0(r3) +; LARGE32-NEXT: addi r1, r1, 32 +; LARGE32-NEXT: lwz r0, 8(r1) +; LARGE32-NEXT: mtlr r0 +; LARGE32-NEXT: blr +; +; SMALL32-LABEL: store_intern_long_nonzero: +; SMALL32: # %bb.0: # %entry +; SMALL32-NEXT: mflr r0 +; SMALL32-NEXT: stwu r1, -32(r1) +; SMALL32-NEXT: lwz r6, L..C7(r2) # target-flags(ppc-tprel) @intern_long_nonzero +; SMALL32-NEXT: mr r5, r3 +; SMALL32-NEXT: bla .__get_tpointer[PR] +; SMALL32-NEXT: stw r0, 40(r1) +; SMALL32-NEXT: add r3, r3, r6 +; SMALL32-NEXT: stw r4, 4(r3) +; SMALL32-NEXT: stw r5, 0(r3) +; SMALL32-NEXT: addi r1, r1, 32 +; SMALL32-NEXT: lwz r0, 8(r1) +; SMALL32-NEXT: mtlr r0 +; SMALL32-NEXT: blr +entry: + %addr = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @intern_long_nonzero) + store i64 %i, ptr %addr, align 8 + ret void +} + +define i64 @load_global_long_zero() { +; LARGE64-LABEL: load_global_long_zero: +; LARGE64: # %bb.0: # %entry +; LARGE64-NEXT: addis r3, L..C4@u(r2) +; LARGE64-NEXT: ld r3, L..C4@l(r3) +; LARGE64-NEXT: ldx r3, r13, r3 +; LARGE64-NEXT: blr +; +; SMALL64-LABEL: load_global_long_zero: +; SMALL64: # %bb.0: # %entry +; SMALL64-NEXT: ld r3, L..C4(r2) # target-flags(ppc-tprel) @global_long_zero +; SMALL64-NEXT: ldx r3, r13, r3 +; SMALL64-NEXT: blr +; +; LARGE32-LABEL: load_global_long_zero: +; LARGE32: # %bb.0: # %entry +; LARGE32-NEXT: mflr r0 +; LARGE32-NEXT: stwu r1, -32(r1) +; LARGE32-NEXT: stw r0, 40(r1) +; LARGE32-NEXT: addis r3, L..C4@u(r2) +; LARGE32-NEXT: lwz r4, L..C4@l(r3) +; LARGE32-NEXT: bla .__get_tpointer[PR] +; LARGE32-NEXT: add r4, r3, r4 +; LARGE32-NEXT: lwz r3, 0(r4) +; LARGE32-NEXT: lwz r4, 4(r4) +; LARGE32-NEXT: addi r1, r1, 32 +; LARGE32-NEXT: lwz r0, 8(r1) +; LARGE32-NEXT: mtlr r0 +; LARGE32-NEXT: blr +; +; SMALL32-LABEL: load_global_long_zero: +; SMALL32: # %bb.0: # %entry +; SMALL32-NEXT: mflr r0 +; SMALL32-NEXT: stwu r1, -32(r1) +; SMALL32-NEXT: lwz r4, L..C4(r2) # target-flags(ppc-tprel) @global_long_zero +; SMALL32-NEXT: bla .__get_tpointer[PR] +; SMALL32-NEXT: stw r0, 40(r1) +; SMALL32-NEXT: add r4, r3, r4 +; SMALL32-NEXT: lwz r3, 0(r4) +; SMALL32-NEXT: lwz r4, 4(r4) +; SMALL32-NEXT: addi r1, r1, 32 +; SMALL32-NEXT: lwz r0, 8(r1) +; SMALL32-NEXT: mtlr r0 +; SMALL32-NEXT: blr +entry: + %addr = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @global_long_zero) + %val = load i64, ptr %addr, align 8 + ret i64 %val +} + +define i64 @load_global_long_nonzero() { +; LARGE64-LABEL: load_global_long_nonzero: +; LARGE64: # %bb.0: # %entry +; LARGE64-NEXT: addis r3, L..C5@u(r2) +; LARGE64-NEXT: ld r3, L..C5@l(r3) +; LARGE64-NEXT: ldx r3, r13, r3 +; LARGE64-NEXT: blr +; +; SMALL64-LABEL: load_global_long_nonzero: +; SMALL64: # %bb.0: # %entry +; SMALL64-NEXT: ld r3, L..C5(r2) # target-flags(ppc-tprel) @global_long_nonzero +; SMALL64-NEXT: ldx r3, r13, r3 +; SMALL64-NEXT: blr +; +; LARGE32-LABEL: load_global_long_nonzero: +; LARGE32: # %bb.0: # %entry +; LARGE32-NEXT: mflr r0 +; LARGE32-NEXT: stwu r1, -32(r1) +; LARGE32-NEXT: stw r0, 40(r1) +; LARGE32-NEXT: addis r3, L..C5@u(r2) +; LARGE32-NEXT: lwz r4, L..C5@l(r3) +; LARGE32-NEXT: bla .__get_tpointer[PR] +; LARGE32-NEXT: add r4, r3, r4 +; LARGE32-NEXT: lwz r3, 0(r4) +; LARGE32-NEXT: lwz r4, 4(r4) +; LARGE32-NEXT: addi r1, r1, 32 +; LARGE32-NEXT: lwz r0, 8(r1) +; LARGE32-NEXT: mtlr r0 +; LARGE32-NEXT: blr +; +; SMALL32-LABEL: load_global_long_nonzero: +; SMALL32: # %bb.0: # %entry +; SMALL32-NEXT: mflr r0 +; SMALL32-NEXT: stwu r1, -32(r1) +; SMALL32-NEXT: lwz r4, L..C5(r2) # target-flags(ppc-tprel) @global_long_nonzero +; SMALL32-NEXT: bla .__get_tpointer[PR] +; SMALL32-NEXT: stw r0, 40(r1) +; SMALL32-NEXT: add r4, r3, r4 +; SMALL32-NEXT: lwz r3, 0(r4) +; SMALL32-NEXT: lwz r4, 4(r4) +; SMALL32-NEXT: addi r1, r1, 32 +; SMALL32-NEXT: lwz r0, 8(r1) +; SMALL32-NEXT: mtlr r0 +; SMALL32-NEXT: blr +entry: + %addr = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @global_long_nonzero) + %val = load i64, ptr %addr, align 8 + ret i64 %val +} + +define i64 @load_intern_long_zero() { +; LARGE64-LABEL: load_intern_long_zero: +; LARGE64: # %bb.0: # %entry +; LARGE64-NEXT: addis r3, L..C6@u(r2) +; LARGE64-NEXT: ld r3, L..C6@l(r3) +; LARGE64-NEXT: ldx r3, r13, r3 +; LARGE64-NEXT: blr +; +; SMALL64-LABEL: load_intern_long_zero: +; SMALL64: # %bb.0: # %entry +; SMALL64-NEXT: ld r3, L..C6(r2) # target-flags(ppc-tprel) @intern_long_zero +; SMALL64-NEXT: ldx r3, r13, r3 +; SMALL64-NEXT: blr +; +; LARGE32-LABEL: load_intern_long_zero: +; LARGE32: # %bb.0: # %entry +; LARGE32-NEXT: mflr r0 +; LARGE32-NEXT: stwu r1, -32(r1) +; LARGE32-NEXT: stw r0, 40(r1) +; LARGE32-NEXT: addis r3, L..C6@u(r2) +; LARGE32-NEXT: lwz r4, L..C6@l(r3) +; LARGE32-NEXT: bla .__get_tpointer[PR] +; LARGE32-NEXT: add r4, r3, r4 +; LARGE32-NEXT: lwz r3, 0(r4) +; LARGE32-NEXT: lwz r4, 4(r4) +; LARGE32-NEXT: addi r1, r1, 32 +; LARGE32-NEXT: lwz r0, 8(r1) +; LARGE32-NEXT: mtlr r0 +; LARGE32-NEXT: blr +; +; SMALL32-LABEL: load_intern_long_zero: +; SMALL32: # %bb.0: # %entry +; SMALL32-NEXT: mflr r0 +; SMALL32-NEXT: stwu r1, -32(r1) +; SMALL32-NEXT: lwz r4, L..C6(r2) # target-flags(ppc-tprel) @intern_long_zero +; SMALL32-NEXT: bla .__get_tpointer[PR] +; SMALL32-NEXT: stw r0, 40(r1) +; SMALL32-NEXT: add r4, r3, r4 +; SMALL32-NEXT: lwz r3, 0(r4) +; SMALL32-NEXT: lwz r4, 4(r4) +; SMALL32-NEXT: addi r1, r1, 32 +; SMALL32-NEXT: lwz r0, 8(r1) +; SMALL32-NEXT: mtlr r0 +; SMALL32-NEXT: blr +entry: + %addr = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @intern_long_zero) + %val = load i64, ptr %addr, align 8 + ret i64 %val +} + +define i64 @load_intern_long_nonzero() { +; LARGE64-LABEL: load_intern_long_nonzero: +; LARGE64: # %bb.0: # %entry +; LARGE64-NEXT: addis r3, L..C7@u(r2) +; LARGE64-NEXT: ld r3, L..C7@l(r3) +; LARGE64-NEXT: ldx r3, r13, r3 +; LARGE64-NEXT: blr +; +; SMALL64-LABEL: load_intern_long_nonzero: +; SMALL64: # %bb.0: # %entry +; SMALL64-NEXT: ld r3, L..C7(r2) # target-flags(ppc-tprel) @intern_long_nonzero +; SMALL64-NEXT: ldx r3, r13, r3 +; SMALL64-NEXT: blr +; +; LARGE32-LABEL: load_intern_long_nonzero: +; LARGE32: # %bb.0: # %entry +; LARGE32-NEXT: mflr r0 +; LARGE32-NEXT: stwu r1, -32(r1) +; LARGE32-NEXT: stw r0, 40(r1) +; LARGE32-NEXT: addis r3, L..C7@u(r2) +; LARGE32-NEXT: lwz r4, L..C7@l(r3) +; LARGE32-NEXT: bla .__get_tpointer[PR] +; LARGE32-NEXT: add r4, r3, r4 +; LARGE32-NEXT: lwz r3, 0(r4) +; LARGE32-NEXT: lwz r4, 4(r4) +; LARGE32-NEXT: addi r1, r1, 32 +; LARGE32-NEXT: lwz r0, 8(r1) +; LARGE32-NEXT: mtlr r0 +; LARGE32-NEXT: blr +; +; SMALL32-LABEL: load_intern_long_nonzero: +; SMALL32: # %bb.0: # %entry +; SMALL32-NEXT: mflr r0 +; SMALL32-NEXT: stwu r1, -32(r1) +; SMALL32-NEXT: lwz r4, L..C7(r2) # target-flags(ppc-tprel) @intern_long_nonzero +; SMALL32-NEXT: bla .__get_tpointer[PR] +; SMALL32-NEXT: stw r0, 40(r1) +; SMALL32-NEXT: add r4, r3, r4 +; SMALL32-NEXT: lwz r3, 0(r4) +; SMALL32-NEXT: lwz r4, 4(r4) +; SMALL32-NEXT: addi r1, r1, 32 +; SMALL32-NEXT: lwz r0, 8(r1) +; SMALL32-NEXT: mtlr r0 +; SMALL32-NEXT: blr +entry: + %addr = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @intern_long_nonzero) + %val = load i64, ptr %addr, align 8 + ret i64 %val +} + +define void @store_global_float_zero(float noundef %i) { +; LARGE64-LABEL: store_global_float_zero: +; LARGE64: # %bb.0: # %entry +; LARGE64-NEXT: addis r3, L..C8@u(r2) +; LARGE64-NEXT: ld r3, L..C8@l(r3) +; LARGE64-NEXT: stfsx f1, r13, r3 +; LARGE64-NEXT: blr +; +; SMALL64-LABEL: store_global_float_zero: +; SMALL64: # %bb.0: # %entry +; SMALL64-NEXT: ld r3, L..C8(r2) # target-flags(ppc-tprel) @global_float_zero +; SMALL64-NEXT: stfsx f1, r13, r3 +; SMALL64-NEXT: blr +; +; LARGE32-LABEL: store_global_float_zero: +; LARGE32: # %bb.0: # %entry +; LARGE32-NEXT: mflr r0 +; LARGE32-NEXT: stwu r1, -32(r1) +; LARGE32-NEXT: stw r0, 40(r1) +; LARGE32-NEXT: addis r3, L..C8@u(r2) +; LARGE32-NEXT: lwz r4, L..C8@l(r3) +; LARGE32-NEXT: bla .__get_tpointer[PR] +; LARGE32-NEXT: stfsx f1, r3, r4 +; LARGE32-NEXT: addi r1, r1, 32 +; LARGE32-NEXT: lwz r0, 8(r1) +; LARGE32-NEXT: mtlr r0 +; LARGE32-NEXT: blr +; +; SMALL32-LABEL: store_global_float_zero: +; SMALL32: # %bb.0: # %entry +; SMALL32-NEXT: mflr r0 +; SMALL32-NEXT: stwu r1, -32(r1) +; SMALL32-NEXT: lwz r4, L..C8(r2) # target-flags(ppc-tprel) @global_float_zero +; SMALL32-NEXT: bla .__get_tpointer[PR] +; SMALL32-NEXT: stw r0, 40(r1) +; SMALL32-NEXT: stfsx f1, r3, r4 +; SMALL32-NEXT: addi r1, r1, 32 +; SMALL32-NEXT: lwz r0, 8(r1) +; SMALL32-NEXT: mtlr r0 +; SMALL32-NEXT: blr +entry: + %addr = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @global_float_zero) + store float %i, ptr %addr, align 4 + ret void +} + +define void @store_global_float_nonzero(float noundef %i) { +; LARGE64-LABEL: store_global_float_nonzero: +; LARGE64: # %bb.0: # %entry +; LARGE64-NEXT: addis r3, L..C9@u(r2) +; LARGE64-NEXT: ld r3, L..C9@l(r3) +; LARGE64-NEXT: stfsx f1, r13, r3 +; LARGE64-NEXT: blr +; +; SMALL64-LABEL: store_global_float_nonzero: +; SMALL64: # %bb.0: # %entry +; SMALL64-NEXT: ld r3, L..C9(r2) # target-flags(ppc-tprel) @global_float_nonzero +; SMALL64-NEXT: stfsx f1, r13, r3 +; SMALL64-NEXT: blr +; +; LARGE32-LABEL: store_global_float_nonzero: +; LARGE32: # %bb.0: # %entry +; LARGE32-NEXT: mflr r0 +; LARGE32-NEXT: stwu r1, -32(r1) +; LARGE32-NEXT: stw r0, 40(r1) +; LARGE32-NEXT: addis r3, L..C9@u(r2) +; LARGE32-NEXT: lwz r4, L..C9@l(r3) +; LARGE32-NEXT: bla .__get_tpointer[PR] +; LARGE32-NEXT: stfsx f1, r3, r4 +; LARGE32-NEXT: addi r1, r1, 32 +; LARGE32-NEXT: lwz r0, 8(r1) +; LARGE32-NEXT: mtlr r0 +; LARGE32-NEXT: blr +; +; SMALL32-LABEL: store_global_float_nonzero: +; SMALL32: # %bb.0: # %entry +; SMALL32-NEXT: mflr r0 +; SMALL32-NEXT: stwu r1, -32(r1) +; SMALL32-NEXT: lwz r4, L..C9(r2) # target-flags(ppc-tprel) @global_float_nonzero +; SMALL32-NEXT: bla .__get_tpointer[PR] +; SMALL32-NEXT: stw r0, 40(r1) +; SMALL32-NEXT: stfsx f1, r3, r4 +; SMALL32-NEXT: addi r1, r1, 32 +; SMALL32-NEXT: lwz r0, 8(r1) +; SMALL32-NEXT: mtlr r0 +; SMALL32-NEXT: blr +entry: + %addr = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @global_float_nonzero) + store float %i, ptr %addr, align 4 + ret void +} + +define void @store_intern_float_zero(float noundef %i) { +; LARGE64-LABEL: store_intern_float_zero: +; LARGE64: # %bb.0: # %entry +; LARGE64-NEXT: addis r3, L..C10@u(r2) +; LARGE64-NEXT: ld r3, L..C10@l(r3) +; LARGE64-NEXT: stfsx f1, r13, r3 +; LARGE64-NEXT: blr +; +; SMALL64-LABEL: store_intern_float_zero: +; SMALL64: # %bb.0: # %entry +; SMALL64-NEXT: ld r3, L..C10(r2) # target-flags(ppc-tprel) @intern_float_zero +; SMALL64-NEXT: stfsx f1, r13, r3 +; SMALL64-NEXT: blr +; +; LARGE32-LABEL: store_intern_float_zero: +; LARGE32: # %bb.0: # %entry +; LARGE32-NEXT: mflr r0 +; LARGE32-NEXT: stwu r1, -32(r1) +; LARGE32-NEXT: stw r0, 40(r1) +; LARGE32-NEXT: addis r3, L..C10@u(r2) +; LARGE32-NEXT: lwz r4, L..C10@l(r3) +; LARGE32-NEXT: bla .__get_tpointer[PR] +; LARGE32-NEXT: stfsx f1, r3, r4 +; LARGE32-NEXT: addi r1, r1, 32 +; LARGE32-NEXT: lwz r0, 8(r1) +; LARGE32-NEXT: mtlr r0 +; LARGE32-NEXT: blr +; +; SMALL32-LABEL: store_intern_float_zero: +; SMALL32: # %bb.0: # %entry +; SMALL32-NEXT: mflr r0 +; SMALL32-NEXT: stwu r1, -32(r1) +; SMALL32-NEXT: lwz r4, L..C10(r2) # target-flags(ppc-tprel) @intern_float_zero +; SMALL32-NEXT: bla .__get_tpointer[PR] +; SMALL32-NEXT: stw r0, 40(r1) +; SMALL32-NEXT: stfsx f1, r3, r4 +; SMALL32-NEXT: addi r1, r1, 32 +; SMALL32-NEXT: lwz r0, 8(r1) +; SMALL32-NEXT: mtlr r0 +; SMALL32-NEXT: blr +entry: + %addr = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @intern_float_zero) + store float %i, ptr %addr, align 4 + ret void +} + +define void @store_intern_float_nonzero(float noundef %i) { +; LARGE64-LABEL: store_intern_float_nonzero: +; LARGE64: # %bb.0: # %entry +; LARGE64-NEXT: addis r3, L..C11@u(r2) +; LARGE64-NEXT: ld r3, L..C11@l(r3) +; LARGE64-NEXT: stfsx f1, r13, r3 +; LARGE64-NEXT: blr +; +; SMALL64-LABEL: store_intern_float_nonzero: +; SMALL64: # %bb.0: # %entry +; SMALL64-NEXT: ld r3, L..C11(r2) # target-flags(ppc-tprel) @intern_float_nonzero +; SMALL64-NEXT: stfsx f1, r13, r3 +; SMALL64-NEXT: blr +; +; LARGE32-LABEL: store_intern_float_nonzero: +; LARGE32: # %bb.0: # %entry +; LARGE32-NEXT: mflr r0 +; LARGE32-NEXT: stwu r1, -32(r1) +; LARGE32-NEXT: stw r0, 40(r1) +; LARGE32-NEXT: addis r3, L..C11@u(r2) +; LARGE32-NEXT: lwz r4, L..C11@l(r3) +; LARGE32-NEXT: bla .__get_tpointer[PR] +; LARGE32-NEXT: stfsx f1, r3, r4 +; LARGE32-NEXT: addi r1, r1, 32 +; LARGE32-NEXT: lwz r0, 8(r1) +; LARGE32-NEXT: mtlr r0 +; LARGE32-NEXT: blr +; +; SMALL32-LABEL: store_intern_float_nonzero: +; SMALL32: # %bb.0: # %entry +; SMALL32-NEXT: mflr r0 +; SMALL32-NEXT: stwu r1, -32(r1) +; SMALL32-NEXT: lwz r4, L..C11(r2) # target-flags(ppc-tprel) @intern_float_nonzero +; SMALL32-NEXT: bla .__get_tpointer[PR] +; SMALL32-NEXT: stw r0, 40(r1) +; SMALL32-NEXT: stfsx f1, r3, r4 +; SMALL32-NEXT: addi r1, r1, 32 +; SMALL32-NEXT: lwz r0, 8(r1) +; SMALL32-NEXT: mtlr r0 +; SMALL32-NEXT: blr +entry: + %addr = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @intern_float_nonzero) + store float %i, ptr %addr, align 4 + ret void +} + +define float @load_global_float_zero() { +; LARGE64-LABEL: load_global_float_zero: +; LARGE64: # %bb.0: # %entry +; LARGE64-NEXT: addis r3, L..C8@u(r2) +; LARGE64-NEXT: ld r3, L..C8@l(r3) +; LARGE64-NEXT: lfsx f1, r13, r3 +; LARGE64-NEXT: blr +; +; SMALL64-LABEL: load_global_float_zero: +; SMALL64: # %bb.0: # %entry +; SMALL64-NEXT: ld r3, L..C8(r2) # target-flags(ppc-tprel) @global_float_zero +; SMALL64-NEXT: lfsx f1, r13, r3 +; SMALL64-NEXT: blr +; +; LARGE32-LABEL: load_global_float_zero: +; LARGE32: # %bb.0: # %entry +; LARGE32-NEXT: mflr r0 +; LARGE32-NEXT: stwu r1, -32(r1) +; LARGE32-NEXT: stw r0, 40(r1) +; LARGE32-NEXT: addis r3, L..C8@u(r2) +; LARGE32-NEXT: lwz r4, L..C8@l(r3) +; LARGE32-NEXT: bla .__get_tpointer[PR] +; LARGE32-NEXT: lfsx f1, r3, r4 +; LARGE32-NEXT: addi r1, r1, 32 +; LARGE32-NEXT: lwz r0, 8(r1) +; LARGE32-NEXT: mtlr r0 +; LARGE32-NEXT: blr +; +; SMALL32-LABEL: load_global_float_zero: +; SMALL32: # %bb.0: # %entry +; SMALL32-NEXT: mflr r0 +; SMALL32-NEXT: stwu r1, -32(r1) +; SMALL32-NEXT: lwz r4, L..C8(r2) # target-flags(ppc-tprel) @global_float_zero +; SMALL32-NEXT: stw r0, 40(r1) +; SMALL32-NEXT: bla .__get_tpointer[PR] +; SMALL32-NEXT: lfsx f1, r3, r4 +; SMALL32-NEXT: addi r1, r1, 32 +; SMALL32-NEXT: lwz r0, 8(r1) +; SMALL32-NEXT: mtlr r0 +; SMALL32-NEXT: blr +entry: + %addr = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @global_float_zero) + %val = load float, ptr %addr, align 4 + ret float %val +} + +define float @load_global_float_nonzero() { +; LARGE64-LABEL: load_global_float_nonzero: +; LARGE64: # %bb.0: # %entry +; LARGE64-NEXT: addis r3, L..C9@u(r2) +; LARGE64-NEXT: ld r3, L..C9@l(r3) +; LARGE64-NEXT: lfsx f1, r13, r3 +; LARGE64-NEXT: blr +; +; SMALL64-LABEL: load_global_float_nonzero: +; SMALL64: # %bb.0: # %entry +; SMALL64-NEXT: ld r3, L..C9(r2) # target-flags(ppc-tprel) @global_float_nonzero +; SMALL64-NEXT: lfsx f1, r13, r3 +; SMALL64-NEXT: blr +; +; LARGE32-LABEL: load_global_float_nonzero: +; LARGE32: # %bb.0: # %entry +; LARGE32-NEXT: mflr r0 +; LARGE32-NEXT: stwu r1, -32(r1) +; LARGE32-NEXT: stw r0, 40(r1) +; LARGE32-NEXT: addis r3, L..C9@u(r2) +; LARGE32-NEXT: lwz r4, L..C9@l(r3) +; LARGE32-NEXT: bla .__get_tpointer[PR] +; LARGE32-NEXT: lfsx f1, r3, r4 +; LARGE32-NEXT: addi r1, r1, 32 +; LARGE32-NEXT: lwz r0, 8(r1) +; LARGE32-NEXT: mtlr r0 +; LARGE32-NEXT: blr +; +; SMALL32-LABEL: load_global_float_nonzero: +; SMALL32: # %bb.0: # %entry +; SMALL32-NEXT: mflr r0 +; SMALL32-NEXT: stwu r1, -32(r1) +; SMALL32-NEXT: lwz r4, L..C9(r2) # target-flags(ppc-tprel) @global_float_nonzero +; SMALL32-NEXT: stw r0, 40(r1) +; SMALL32-NEXT: bla .__get_tpointer[PR] +; SMALL32-NEXT: lfsx f1, r3, r4 +; SMALL32-NEXT: addi r1, r1, 32 +; SMALL32-NEXT: lwz r0, 8(r1) +; SMALL32-NEXT: mtlr r0 +; SMALL32-NEXT: blr +entry: + %addr = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @global_float_nonzero) + %val = load float, ptr %addr, align 4 + ret float %val +} + +define float @load_intern_float_zero() { +; LARGE64-LABEL: load_intern_float_zero: +; LARGE64: # %bb.0: # %entry +; LARGE64-NEXT: addis r3, L..C10@u(r2) +; LARGE64-NEXT: ld r3, L..C10@l(r3) +; LARGE64-NEXT: lfsx f1, r13, r3 +; LARGE64-NEXT: blr +; +; SMALL64-LABEL: load_intern_float_zero: +; SMALL64: # %bb.0: # %entry +; SMALL64-NEXT: ld r3, L..C10(r2) # target-flags(ppc-tprel) @intern_float_zero +; SMALL64-NEXT: lfsx f1, r13, r3 +; SMALL64-NEXT: blr +; +; LARGE32-LABEL: load_intern_float_zero: +; LARGE32: # %bb.0: # %entry +; LARGE32-NEXT: mflr r0 +; LARGE32-NEXT: stwu r1, -32(r1) +; LARGE32-NEXT: stw r0, 40(r1) +; LARGE32-NEXT: addis r3, L..C10@u(r2) +; LARGE32-NEXT: lwz r4, L..C10@l(r3) +; LARGE32-NEXT: bla .__get_tpointer[PR] +; LARGE32-NEXT: lfsx f1, r3, r4 +; LARGE32-NEXT: addi r1, r1, 32 +; LARGE32-NEXT: lwz r0, 8(r1) +; LARGE32-NEXT: mtlr r0 +; LARGE32-NEXT: blr +; +; SMALL32-LABEL: load_intern_float_zero: +; SMALL32: # %bb.0: # %entry +; SMALL32-NEXT: mflr r0 +; SMALL32-NEXT: stwu r1, -32(r1) +; SMALL32-NEXT: lwz r4, L..C10(r2) # target-flags(ppc-tprel) @intern_float_zero +; SMALL32-NEXT: stw r0, 40(r1) +; SMALL32-NEXT: bla .__get_tpointer[PR] +; SMALL32-NEXT: lfsx f1, r3, r4 +; SMALL32-NEXT: addi r1, r1, 32 +; SMALL32-NEXT: lwz r0, 8(r1) +; SMALL32-NEXT: mtlr r0 +; SMALL32-NEXT: blr +entry: + %addr = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @intern_float_zero) + %val = load float, ptr %addr, align 4 + ret float %val +} + +define float @load_intern_float_nonzero() { +; LARGE64-LABEL: load_intern_float_nonzero: +; LARGE64: # %bb.0: # %entry +; LARGE64-NEXT: addis r3, L..C11@u(r2) +; LARGE64-NEXT: ld r3, L..C11@l(r3) +; LARGE64-NEXT: lfsx f1, r13, r3 +; LARGE64-NEXT: blr +; +; SMALL64-LABEL: load_intern_float_nonzero: +; SMALL64: # %bb.0: # %entry +; SMALL64-NEXT: ld r3, L..C11(r2) # target-flags(ppc-tprel) @intern_float_nonzero +; SMALL64-NEXT: lfsx f1, r13, r3 +; SMALL64-NEXT: blr +; +; LARGE32-LABEL: load_intern_float_nonzero: +; LARGE32: # %bb.0: # %entry +; LARGE32-NEXT: mflr r0 +; LARGE32-NEXT: stwu r1, -32(r1) +; LARGE32-NEXT: stw r0, 40(r1) +; LARGE32-NEXT: addis r3, L..C11@u(r2) +; LARGE32-NEXT: lwz r4, L..C11@l(r3) +; LARGE32-NEXT: bla .__get_tpointer[PR] +; LARGE32-NEXT: lfsx f1, r3, r4 +; LARGE32-NEXT: addi r1, r1, 32 +; LARGE32-NEXT: lwz r0, 8(r1) +; LARGE32-NEXT: mtlr r0 +; LARGE32-NEXT: blr +; +; SMALL32-LABEL: load_intern_float_nonzero: +; SMALL32: # %bb.0: # %entry +; SMALL32-NEXT: mflr r0 +; SMALL32-NEXT: stwu r1, -32(r1) +; SMALL32-NEXT: lwz r4, L..C11(r2) # target-flags(ppc-tprel) @intern_float_nonzero +; SMALL32-NEXT: stw r0, 40(r1) +; SMALL32-NEXT: bla .__get_tpointer[PR] +; SMALL32-NEXT: lfsx f1, r3, r4 +; SMALL32-NEXT: addi r1, r1, 32 +; SMALL32-NEXT: lwz r0, 8(r1) +; SMALL32-NEXT: mtlr r0 +; SMALL32-NEXT: blr +entry: + %addr = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @intern_float_nonzero) + %val = load float, ptr %addr, align 4 + ret float %val +} + +define void @store_global_double_zero(double noundef %i) { +; LARGE64-LABEL: store_global_double_zero: +; LARGE64: # %bb.0: # %entry +; LARGE64-NEXT: addis r3, L..C12@u(r2) +; LARGE64-NEXT: ld r3, L..C12@l(r3) +; LARGE64-NEXT: stfdx f1, r13, r3 +; LARGE64-NEXT: blr +; +; SMALL64-LABEL: store_global_double_zero: +; SMALL64: # %bb.0: # %entry +; SMALL64-NEXT: ld r3, L..C12(r2) # target-flags(ppc-tprel) @global_double_zero +; SMALL64-NEXT: stfdx f1, r13, r3 +; SMALL64-NEXT: blr +; +; LARGE32-LABEL: store_global_double_zero: +; LARGE32: # %bb.0: # %entry +; LARGE32-NEXT: mflr r0 +; LARGE32-NEXT: stwu r1, -32(r1) +; LARGE32-NEXT: stw r0, 40(r1) +; LARGE32-NEXT: addis r3, L..C12@u(r2) +; LARGE32-NEXT: lwz r4, L..C12@l(r3) +; LARGE32-NEXT: bla .__get_tpointer[PR] +; LARGE32-NEXT: stfdx f1, r3, r4 +; LARGE32-NEXT: addi r1, r1, 32 +; LARGE32-NEXT: lwz r0, 8(r1) +; LARGE32-NEXT: mtlr r0 +; LARGE32-NEXT: blr +; +; SMALL32-LABEL: store_global_double_zero: +; SMALL32: # %bb.0: # %entry +; SMALL32-NEXT: mflr r0 +; SMALL32-NEXT: stwu r1, -32(r1) +; SMALL32-NEXT: lwz r4, L..C12(r2) # target-flags(ppc-tprel) @global_double_zero +; SMALL32-NEXT: bla .__get_tpointer[PR] +; SMALL32-NEXT: stw r0, 40(r1) +; SMALL32-NEXT: stfdx f1, r3, r4 +; SMALL32-NEXT: addi r1, r1, 32 +; SMALL32-NEXT: lwz r0, 8(r1) +; SMALL32-NEXT: mtlr r0 +; SMALL32-NEXT: blr +entry: + %addr = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @global_double_zero) + store double %i, ptr %addr, align 8 + ret void +} + +define void @store_global_double_nonzero(double noundef %i) { +; LARGE64-LABEL: store_global_double_nonzero: +; LARGE64: # %bb.0: # %entry +; LARGE64-NEXT: addis r3, L..C13@u(r2) +; LARGE64-NEXT: ld r3, L..C13@l(r3) +; LARGE64-NEXT: stfdx f1, r13, r3 +; LARGE64-NEXT: blr +; +; SMALL64-LABEL: store_global_double_nonzero: +; SMALL64: # %bb.0: # %entry +; SMALL64-NEXT: ld r3, L..C13(r2) # target-flags(ppc-tprel) @global_double_nonzero +; SMALL64-NEXT: stfdx f1, r13, r3 +; SMALL64-NEXT: blr +; +; LARGE32-LABEL: store_global_double_nonzero: +; LARGE32: # %bb.0: # %entry +; LARGE32-NEXT: mflr r0 +; LARGE32-NEXT: stwu r1, -32(r1) +; LARGE32-NEXT: stw r0, 40(r1) +; LARGE32-NEXT: addis r3, L..C13@u(r2) +; LARGE32-NEXT: lwz r4, L..C13@l(r3) +; LARGE32-NEXT: bla .__get_tpointer[PR] +; LARGE32-NEXT: stfdx f1, r3, r4 +; LARGE32-NEXT: addi r1, r1, 32 +; LARGE32-NEXT: lwz r0, 8(r1) +; LARGE32-NEXT: mtlr r0 +; LARGE32-NEXT: blr +; +; SMALL32-LABEL: store_global_double_nonzero: +; SMALL32: # %bb.0: # %entry +; SMALL32-NEXT: mflr r0 +; SMALL32-NEXT: stwu r1, -32(r1) +; SMALL32-NEXT: lwz r4, L..C13(r2) # target-flags(ppc-tprel) @global_double_nonzero +; SMALL32-NEXT: bla .__get_tpointer[PR] +; SMALL32-NEXT: stw r0, 40(r1) +; SMALL32-NEXT: stfdx f1, r3, r4 +; SMALL32-NEXT: addi r1, r1, 32 +; SMALL32-NEXT: lwz r0, 8(r1) +; SMALL32-NEXT: mtlr r0 +; SMALL32-NEXT: blr +entry: + %addr = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @global_double_nonzero) + store double %i, ptr %addr, align 8 + ret void +} + +define void @store_intern_double_zero(double noundef %i) { +; LARGE64-LABEL: store_intern_double_zero: +; LARGE64: # %bb.0: # %entry +; LARGE64-NEXT: addis r3, L..C14@u(r2) +; LARGE64-NEXT: ld r3, L..C14@l(r3) +; LARGE64-NEXT: stfdx f1, r13, r3 +; LARGE64-NEXT: blr +; +; SMALL64-LABEL: store_intern_double_zero: +; SMALL64: # %bb.0: # %entry +; SMALL64-NEXT: ld r3, L..C14(r2) # target-flags(ppc-tprel) @intern_double_zero +; SMALL64-NEXT: stfdx f1, r13, r3 +; SMALL64-NEXT: blr +; +; LARGE32-LABEL: store_intern_double_zero: +; LARGE32: # %bb.0: # %entry +; LARGE32-NEXT: mflr r0 +; LARGE32-NEXT: stwu r1, -32(r1) +; LARGE32-NEXT: stw r0, 40(r1) +; LARGE32-NEXT: addis r3, L..C14@u(r2) +; LARGE32-NEXT: lwz r4, L..C14@l(r3) +; LARGE32-NEXT: bla .__get_tpointer[PR] +; LARGE32-NEXT: stfdx f1, r3, r4 +; LARGE32-NEXT: addi r1, r1, 32 +; LARGE32-NEXT: lwz r0, 8(r1) +; LARGE32-NEXT: mtlr r0 +; LARGE32-NEXT: blr +; +; SMALL32-LABEL: store_intern_double_zero: +; SMALL32: # %bb.0: # %entry +; SMALL32-NEXT: mflr r0 +; SMALL32-NEXT: stwu r1, -32(r1) +; SMALL32-NEXT: lwz r4, L..C14(r2) # target-flags(ppc-tprel) @intern_double_zero +; SMALL32-NEXT: bla .__get_tpointer[PR] +; SMALL32-NEXT: stw r0, 40(r1) +; SMALL32-NEXT: stfdx f1, r3, r4 +; SMALL32-NEXT: addi r1, r1, 32 +; SMALL32-NEXT: lwz r0, 8(r1) +; SMALL32-NEXT: mtlr r0 +; SMALL32-NEXT: blr +entry: + %addr = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @intern_double_zero) + store double %i, ptr %addr, align 8 + ret void +} + +define void @store_intern_double_nonzero(double noundef %i) { +; LARGE64-LABEL: store_intern_double_nonzero: +; LARGE64: # %bb.0: # %entry +; LARGE64-NEXT: addis r3, L..C15@u(r2) +; LARGE64-NEXT: ld r3, L..C15@l(r3) +; LARGE64-NEXT: stfdx f1, r13, r3 +; LARGE64-NEXT: blr +; +; SMALL64-LABEL: store_intern_double_nonzero: +; SMALL64: # %bb.0: # %entry +; SMALL64-NEXT: ld r3, L..C15(r2) # target-flags(ppc-tprel) @intern_double_nonzero +; SMALL64-NEXT: stfdx f1, r13, r3 +; SMALL64-NEXT: blr +; +; LARGE32-LABEL: store_intern_double_nonzero: +; LARGE32: # %bb.0: # %entry +; LARGE32-NEXT: mflr r0 +; LARGE32-NEXT: stwu r1, -32(r1) +; LARGE32-NEXT: stw r0, 40(r1) +; LARGE32-NEXT: addis r3, L..C15@u(r2) +; LARGE32-NEXT: lwz r4, L..C15@l(r3) +; LARGE32-NEXT: bla .__get_tpointer[PR] +; LARGE32-NEXT: stfdx f1, r3, r4 +; LARGE32-NEXT: addi r1, r1, 32 +; LARGE32-NEXT: lwz r0, 8(r1) +; LARGE32-NEXT: mtlr r0 +; LARGE32-NEXT: blr +; +; SMALL32-LABEL: store_intern_double_nonzero: +; SMALL32: # %bb.0: # %entry +; SMALL32-NEXT: mflr r0 +; SMALL32-NEXT: stwu r1, -32(r1) +; SMALL32-NEXT: lwz r4, L..C15(r2) # target-flags(ppc-tprel) @intern_double_nonzero +; SMALL32-NEXT: bla .__get_tpointer[PR] +; SMALL32-NEXT: stw r0, 40(r1) +; SMALL32-NEXT: stfdx f1, r3, r4 +; SMALL32-NEXT: addi r1, r1, 32 +; SMALL32-NEXT: lwz r0, 8(r1) +; SMALL32-NEXT: mtlr r0 +; SMALL32-NEXT: blr +entry: + %addr = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @intern_double_nonzero) + store double %i, ptr %addr, align 8 + ret void +} + +define double @load_global_double_zero() { +; LARGE64-LABEL: load_global_double_zero: +; LARGE64: # %bb.0: # %entry +; LARGE64-NEXT: addis r3, L..C12@u(r2) +; LARGE64-NEXT: ld r3, L..C12@l(r3) +; LARGE64-NEXT: lfdx f1, r13, r3 +; LARGE64-NEXT: blr +; +; SMALL64-LABEL: load_global_double_zero: +; SMALL64: # %bb.0: # %entry +; SMALL64-NEXT: ld r3, L..C12(r2) # target-flags(ppc-tprel) @global_double_zero +; SMALL64-NEXT: lfdx f1, r13, r3 +; SMALL64-NEXT: blr +; +; LARGE32-LABEL: load_global_double_zero: +; LARGE32: # %bb.0: # %entry +; LARGE32-NEXT: mflr r0 +; LARGE32-NEXT: stwu r1, -32(r1) +; LARGE32-NEXT: stw r0, 40(r1) +; LARGE32-NEXT: addis r3, L..C12@u(r2) +; LARGE32-NEXT: lwz r4, L..C12@l(r3) +; LARGE32-NEXT: bla .__get_tpointer[PR] +; LARGE32-NEXT: lfdx f1, r3, r4 +; LARGE32-NEXT: addi r1, r1, 32 +; LARGE32-NEXT: lwz r0, 8(r1) +; LARGE32-NEXT: mtlr r0 +; LARGE32-NEXT: blr +; +; SMALL32-LABEL: load_global_double_zero: +; SMALL32: # %bb.0: # %entry +; SMALL32-NEXT: mflr r0 +; SMALL32-NEXT: stwu r1, -32(r1) +; SMALL32-NEXT: lwz r4, L..C12(r2) # target-flags(ppc-tprel) @global_double_zero +; SMALL32-NEXT: stw r0, 40(r1) +; SMALL32-NEXT: bla .__get_tpointer[PR] +; SMALL32-NEXT: lfdx f1, r3, r4 +; SMALL32-NEXT: addi r1, r1, 32 +; SMALL32-NEXT: lwz r0, 8(r1) +; SMALL32-NEXT: mtlr r0 +; SMALL32-NEXT: blr +entry: + %addr = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @global_double_zero) + %val = load double, ptr %addr, align 8 + ret double %val +} + +define double @load_global_double_nonzero() { +; LARGE64-LABEL: load_global_double_nonzero: +; LARGE64: # %bb.0: # %entry +; LARGE64-NEXT: addis r3, L..C13@u(r2) +; LARGE64-NEXT: ld r3, L..C13@l(r3) +; LARGE64-NEXT: lfdx f1, r13, r3 +; LARGE64-NEXT: blr +; +; SMALL64-LABEL: load_global_double_nonzero: +; SMALL64: # %bb.0: # %entry +; SMALL64-NEXT: ld r3, L..C13(r2) # target-flags(ppc-tprel) @global_double_nonzero +; SMALL64-NEXT: lfdx f1, r13, r3 +; SMALL64-NEXT: blr +; +; LARGE32-LABEL: load_global_double_nonzero: +; LARGE32: # %bb.0: # %entry +; LARGE32-NEXT: mflr r0 +; LARGE32-NEXT: stwu r1, -32(r1) +; LARGE32-NEXT: stw r0, 40(r1) +; LARGE32-NEXT: addis r3, L..C13@u(r2) +; LARGE32-NEXT: lwz r4, L..C13@l(r3) +; LARGE32-NEXT: bla .__get_tpointer[PR] +; LARGE32-NEXT: lfdx f1, r3, r4 +; LARGE32-NEXT: addi r1, r1, 32 +; LARGE32-NEXT: lwz r0, 8(r1) +; LARGE32-NEXT: mtlr r0 +; LARGE32-NEXT: blr +; +; SMALL32-LABEL: load_global_double_nonzero: +; SMALL32: # %bb.0: # %entry +; SMALL32-NEXT: mflr r0 +; SMALL32-NEXT: stwu r1, -32(r1) +; SMALL32-NEXT: lwz r4, L..C13(r2) # target-flags(ppc-tprel) @global_double_nonzero +; SMALL32-NEXT: stw r0, 40(r1) +; SMALL32-NEXT: bla .__get_tpointer[PR] +; SMALL32-NEXT: lfdx f1, r3, r4 +; SMALL32-NEXT: addi r1, r1, 32 +; SMALL32-NEXT: lwz r0, 8(r1) +; SMALL32-NEXT: mtlr r0 +; SMALL32-NEXT: blr +entry: + %addr = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @global_double_nonzero) + %val = load double, ptr %addr, align 8 + ret double %val +} + +define double @load_intern_double_zero() { +; LARGE64-LABEL: load_intern_double_zero: +; LARGE64: # %bb.0: # %entry +; LARGE64-NEXT: addis r3, L..C14@u(r2) +; LARGE64-NEXT: ld r3, L..C14@l(r3) +; LARGE64-NEXT: lfdx f1, r13, r3 +; LARGE64-NEXT: blr +; +; SMALL64-LABEL: load_intern_double_zero: +; SMALL64: # %bb.0: # %entry +; SMALL64-NEXT: ld r3, L..C14(r2) # target-flags(ppc-tprel) @intern_double_zero +; SMALL64-NEXT: lfdx f1, r13, r3 +; SMALL64-NEXT: blr +; +; LARGE32-LABEL: load_intern_double_zero: +; LARGE32: # %bb.0: # %entry +; LARGE32-NEXT: mflr r0 +; LARGE32-NEXT: stwu r1, -32(r1) +; LARGE32-NEXT: stw r0, 40(r1) +; LARGE32-NEXT: addis r3, L..C14@u(r2) +; LARGE32-NEXT: lwz r4, L..C14@l(r3) +; LARGE32-NEXT: bla .__get_tpointer[PR] +; LARGE32-NEXT: lfdx f1, r3, r4 +; LARGE32-NEXT: addi r1, r1, 32 +; LARGE32-NEXT: lwz r0, 8(r1) +; LARGE32-NEXT: mtlr r0 +; LARGE32-NEXT: blr +; +; SMALL32-LABEL: load_intern_double_zero: +; SMALL32: # %bb.0: # %entry +; SMALL32-NEXT: mflr r0 +; SMALL32-NEXT: stwu r1, -32(r1) +; SMALL32-NEXT: lwz r4, L..C14(r2) # target-flags(ppc-tprel) @intern_double_zero +; SMALL32-NEXT: stw r0, 40(r1) +; SMALL32-NEXT: bla .__get_tpointer[PR] +; SMALL32-NEXT: lfdx f1, r3, r4 +; SMALL32-NEXT: addi r1, r1, 32 +; SMALL32-NEXT: lwz r0, 8(r1) +; SMALL32-NEXT: mtlr r0 +; SMALL32-NEXT: blr +entry: + %addr = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @intern_double_zero) + %val = load double, ptr %addr, align 8 + ret double %val +} + +define double @load_intern_double_nonzero() { +; LARGE64-LABEL: load_intern_double_nonzero: +; LARGE64: # %bb.0: # %entry +; LARGE64-NEXT: addis r3, L..C15@u(r2) +; LARGE64-NEXT: ld r3, L..C15@l(r3) +; LARGE64-NEXT: lfdx f1, r13, r3 +; LARGE64-NEXT: blr +; +; SMALL64-LABEL: load_intern_double_nonzero: +; SMALL64: # %bb.0: # %entry +; SMALL64-NEXT: ld r3, L..C15(r2) # target-flags(ppc-tprel) @intern_double_nonzero +; SMALL64-NEXT: lfdx f1, r13, r3 +; SMALL64-NEXT: blr +; +; LARGE32-LABEL: load_intern_double_nonzero: +; LARGE32: # %bb.0: # %entry +; LARGE32-NEXT: mflr r0 +; LARGE32-NEXT: stwu r1, -32(r1) +; LARGE32-NEXT: stw r0, 40(r1) +; LARGE32-NEXT: addis r3, L..C15@u(r2) +; LARGE32-NEXT: lwz r4, L..C15@l(r3) +; LARGE32-NEXT: bla .__get_tpointer[PR] +; LARGE32-NEXT: lfdx f1, r3, r4 +; LARGE32-NEXT: addi r1, r1, 32 +; LARGE32-NEXT: lwz r0, 8(r1) +; LARGE32-NEXT: mtlr r0 +; LARGE32-NEXT: blr +; +; SMALL32-LABEL: load_intern_double_nonzero: +; SMALL32: # %bb.0: # %entry +; SMALL32-NEXT: mflr r0 +; SMALL32-NEXT: stwu r1, -32(r1) +; SMALL32-NEXT: lwz r4, L..C15(r2) # target-flags(ppc-tprel) @intern_double_nonzero +; SMALL32-NEXT: stw r0, 40(r1) +; SMALL32-NEXT: bla .__get_tpointer[PR] +; SMALL32-NEXT: lfdx f1, r3, r4 +; SMALL32-NEXT: addi r1, r1, 32 +; SMALL32-NEXT: lwz r0, 8(r1) +; SMALL32-NEXT: mtlr r0 +; SMALL32-NEXT: blr +entry: + %addr = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @intern_double_nonzero) + %val = load double, ptr %addr, align 8 + ret double %val +} + +declare nonnull ptr @llvm.threadlocal.address.p0(ptr nonnull) + +; LARGE64-COUNT-16: .tc [[NAME:[a-z_]+]][TE],[[NAME]][{{T|U}}L]@ie +; SMALL64-COUNT-16: .tc [[NAME:[a-z_]+]][TC],[[NAME]][{{T|U}}L]@ie +; LARGE32-COUNT-16: .tc [[NAME:[a-z_]+]][TE],[[NAME]][{{T|U}}L]@ie +; SMALL32-COUNT-16: .tc [[NAME:[a-z_]+]][TC],[[NAME]][{{T|U}}L]@ie diff --git a/llvm/test/CodeGen/PowerPC/aix-tls-ie-xcoff-reloc.ll b/llvm/test/CodeGen/PowerPC/aix-tls-ie-xcoff-reloc.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/PowerPC/aix-tls-ie-xcoff-reloc.ll @@ -0,0 +1,387 @@ +; RUN: llc -verify-machineinstrs -mcpu=pwr7 -mtriple=powerpc64-ibm-aix-xcoff \ +; RUN: -xcoff-traceback-table=false -data-sections=false -filetype=obj -o %t.o < %s +; RUN: llvm-readobj --relocs --expand-relocs %t.o | FileCheck --check-prefix=REL64 %s +; RUN: llvm-readobj --syms %t.o | FileCheck --check-prefix=SYM64 %s +; RUN: llvm-objdump -D -r --symbol-description %t.o | FileCheck --check-prefix=DIS64 %s + +; RUN: llc -verify-machineinstrs -mcpu=pwr7 -mtriple=powerpc64-ibm-aix-xcoff -code-model=small \ +; RUN: -xcoff-traceback-table=false -data-sections=false -filetype=obj -o %t.o < %s +; RUN: llvm-readobj --relocs --expand-relocs %t.o | FileCheck --check-prefix=REL64 %s +; RUN: llvm-readobj --syms %t.o | FileCheck --check-prefix=SYM64 %s +; RUN: llvm-objdump -D -r --symbol-description %t.o | FileCheck --check-prefix=DIS64 %s + +; RUN: llc -verify-machineinstrs -mcpu=pwr7 -mtriple=powerpc-ibm-aix-xcoff \ +; RUN: -xcoff-traceback-table=false -data-sections=false -filetype=obj -o %t.o < %s +; RUN: llvm-readobj --relocs --expand-relocs %t.o | FileCheck --check-prefix=REL32 %s +; RUN: llvm-readobj --syms %t.o | FileCheck --check-prefix=SYM32 %s +; RUN: llvm-objdump -D -r --symbol-description %t.o | FileCheck --check-prefix=DIS32 %s + +; RUN: llc -verify-machineinstrs -mcpu=pwr7 -mtriple=powerpc-ibm-aix-xcoff -code-model=small \ +; RUN: -xcoff-traceback-table=false -data-sections=false -filetype=obj -o %t.o < %s +; RUN: llvm-readobj --relocs --expand-relocs %t.o | FileCheck --check-prefix=REL32 %s +; RUN: llvm-readobj --syms %t.o | FileCheck --check-prefix=SYM32 %s +; RUN: llvm-objdump -D -r --symbol-description %t.o | FileCheck --check-prefix=DIS32 %s + +@global_int_nonzero = thread_local(initialexec) global i32 1, align 4 +@intern_int_zero = internal thread_local(initialexec) global i32 0, align 4 + +define void @store_intern_int_zero(i32 noundef signext %i) { +entry: + %addr = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @intern_int_zero) + store i32 %i, ptr %addr, align 4 + ret void +} + +define signext i32 @load_global_int_nonzero() { +entry: + %addr = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @global_int_nonzero) + %val = load i32, ptr %addr, align 4 + ret i32 %val +} + +define signext i32 @load_intern_int_zero() { +entry: + %addr = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @intern_int_zero) + %val = load i32, ptr %addr, align 4 + ret i32 %val +} + +declare nonnull ptr @llvm.threadlocal.address.p0(ptr nonnull) + +; REL64: File: {{.*}}aix-tls-ie-xcoff-reloc.ll.tmp.o +; REL64-NEXT: Format: aix5coff64-rs6000 +; REL64-NEXT: Arch: powerpc64 +; REL64-NEXT: AddressSize: 64bit +; REL64-NEXT: Relocations [ +; REL64: Virtual Address: 0x2 +; REL64-NEXT: Symbol: intern_int_zero (17) +; REL64-NEXT: IsSigned: No +; REL64-NEXT: FixupBitValue: 0 +; REL64-NEXT: Length: 16 +; REL64-NEXT: Type: R_TOC (0x3) +; REL64-NEXT: } +; REL64: Virtual Address: 0x12 +; REL64-NEXT: Symbol: global_int_nonzero (19) +; REL64-NEXT: IsSigned: No +; REL64-NEXT: FixupBitValue: 0 +; REL64-NEXT: Length: 16 +; REL64-NEXT: Type: R_TOC (0x3) +; REL64-NEXT: } +; REL64: Virtual Address: 0x22 +; REL64-NEXT: Symbol: intern_int_zero (17) +; REL64-NEXT: IsSigned: No +; REL64-NEXT: FixupBitValue: 0 +; REL64-NEXT: Length: 16 +; REL64-NEXT: Type: R_TOC (0x3) +; REL64-NEXT: } +; REL64: Virtual Address: 0x78 +; REL64-NEXT: Symbol: intern_int_zero (25) +; REL64-NEXT: IsSigned: No +; REL64-NEXT: FixupBitValue: 0 +; REL64-NEXT: Length: 64 +; REL64-NEXT: Type: R_TLS_IE (0x21) +; REL64-NEXT: } +; REL64: Virtual Address: 0x80 +; REL64-NEXT: Symbol: global_int_nonzero (23) +; REL64-NEXT: IsSigned: No +; REL64-NEXT: FixupBitValue: 0 +; REL64-NEXT: Length: 64 +; REL64-NEXT: Type: R_TLS_IE (0x21) +; REL64-NEXT: } + +; SYM64: File: {{.*}}aix-tls-ie-xcoff-reloc.ll.tmp.o +; SYM64-NEXT: Format: aix5coff64-rs6000 +; SYM64-NEXT: Arch: powerpc64 +; SYM64-NEXT: AddressSize: 64bit +; SYM64-NEXT: Symbols [ +; SYM64: Index: 17 +; SYM64-NEXT: Name: intern_int_zero +; SYM64-NEXT: Value (RelocatableAddress): 0x78 +; SYM64-NEXT: Section: .data +; SYM64-NEXT: Type: 0x0 +; SYM64-NEXT: StorageClass: C_HIDEXT (0x6B) +; SYM64-NEXT: NumberOfAuxEntries: 1 +; SYM64-NEXT: CSECT Auxiliary Entry { +; SYM64-NEXT: Index: 18 +; SYM64-NEXT: SectionLen: 8 +; SYM64-NEXT: ParameterHashIndex: 0x0 +; SYM64-NEXT: TypeChkSectNum: 0x0 +; SYM64-NEXT: SymbolAlignmentLog2: 3 +; SYM64-NEXT: SymbolType: XTY_SD (0x1) +; SYM64-NEXT: StorageMappingClass: XMC_TC (0x3) +; SYM64-NEXT: Auxiliary Type: AUX_CSECT (0xFB) +; SYM64-NEXT: } +; SYM64-NEXT: } +; SYM64: Index: 19 +; SYM64-NEXT: Name: global_int_nonzero +; SYM64-NEXT: Value (RelocatableAddress): 0x80 +; SYM64-NEXT: Section: .data +; SYM64-NEXT: Type: 0x0 +; SYM64-NEXT: StorageClass: C_HIDEXT (0x6B) +; SYM64-NEXT: NumberOfAuxEntries: 1 +; SYM64-NEXT: CSECT Auxiliary Entry { +; SYM64-NEXT: Index: 20 +; SYM64-NEXT: SectionLen: 8 +; SYM64-NEXT: ParameterHashIndex: 0x0 +; SYM64-NEXT: TypeChkSectNum: 0x0 +; SYM64-NEXT: SymbolAlignmentLog2: 3 +; SYM64-NEXT: SymbolType: XTY_SD (0x1) +; SYM64-NEXT: StorageMappingClass: XMC_TC (0x3) +; SYM64-NEXT: Auxiliary Type: AUX_CSECT (0xFB) +; SYM64-NEXT: } +; SYM64-NEXT: } +; SYM64: Index: 23 +; SYM64-NEXT: Name: global_int_nonzero +; SYM64-NEXT: Value (RelocatableAddress): 0x0 +; SYM64-NEXT: Section: .tdata +; SYM64-NEXT: Type: 0x0 +; SYM64-NEXT: StorageClass: C_EXT (0x2) +; SYM64-NEXT: NumberOfAuxEntries: 1 +; SYM64-NEXT: CSECT Auxiliary Entry { +; SYM64-NEXT: Index: 24 +; SYM64-NEXT: ContainingCsectSymbolIndex: 21 +; SYM64-NEXT: ParameterHashIndex: 0x0 +; SYM64-NEXT: TypeChkSectNum: 0x0 +; SYM64-NEXT: SymbolAlignmentLog2: 0 +; SYM64-NEXT: SymbolType: XTY_LD (0x2) +; SYM64-NEXT: StorageMappingClass: XMC_TL (0x14) +; SYM64-NEXT: Auxiliary Type: AUX_CSECT (0xFB) +; SYM64-NEXT: } +; SYM64-NEXT: } +; SYM64: Index: 25 +; SYM64-NEXT: Name: intern_int_zero +; SYM64-NEXT: Value (RelocatableAddress): 0x4 +; SYM64-NEXT: Section: .tbss +; SYM64-NEXT: Type: 0x0 +; SYM64-NEXT: StorageClass: C_HIDEXT (0x6B) +; SYM64-NEXT: NumberOfAuxEntries: 1 +; SYM64-NEXT: CSECT Auxiliary Entry { +; SYM64-NEXT: Index: 26 +; SYM64-NEXT: SectionLen: 4 +; SYM64-NEXT: ParameterHashIndex: 0x0 +; SYM64-NEXT: TypeChkSectNum: 0x0 +; SYM64-NEXT: SymbolAlignmentLog2: 2 +; SYM64-NEXT: SymbolType: XTY_CM (0x3) +; SYM64-NEXT: StorageMappingClass: XMC_UL (0x15) +; SYM64-NEXT: Auxiliary Type: AUX_CSECT (0xFB) +; SYM64-NEXT: } +; SYM64-NEXT: } + +; DIS64: {{.*}}aix-tls-ie-xcoff-reloc.ll.tmp.o: file format aix5coff64-rs6000 +; DIS64: Disassembly of section .text: +; DIS64: (idx: 3) .store_intern_int_zero: +; DIS64-NEXT: ld 4, 0(2) +; DIS64-NEXT: (idx: 17) intern_int_zero[TC] +; DIS64-NEXT: stwx 3, 13, 4 +; DIS64-NEXT: blr +; DIS64: (idx: 5) .load_global_int_nonzero: +; DIS64-NEXT: ld 3, 8(2) +; DIS64-NEXT: (idx: 19) global_int_nonzero[TC] +; DIS64-NEXT: lwax 3, 13, 3 +; DIS64-NEXT: blr +; DIS64: (idx: 7) .load_intern_int_zero: +; DIS64-NEXT: ld 3, 0(2) +; DIS64-NEXT: (idx: 17) intern_int_zero[TC] +; DIS64-NEXT: lwax 3, 13, 3 +; DIS64-NEXT: blr + +; DIS64: Disassembly of section .data: +; DIS64: (idx: 9) store_intern_int_zero[DS]: +; DIS64: R_POS (idx: 3) .store_intern_int_zero +; DIS64: R_POS (idx: 15) TOC[TC0] +; DIS64: (idx: 11) load_global_int_nonzero[DS]: +; DIS64: R_POS (idx: 5) .load_global_int_nonzero +; DIS64: R_POS (idx: 15) TOC[TC0] +; DIS64: (idx: 13) load_intern_int_zero[DS]: +; DIS64: R_POS (idx: 7) .load_intern_int_zero +; DIS64: R_POS (idx: 15) TOC[TC0] +; DIS64: (idx: 17) intern_int_zero[TC]: +; DIS64: R_TLS_IE (idx: 25) intern_int_zero[UL] +; DIS64: (idx: 19) global_int_nonzero[TC]: +; DIS64: R_TLS_IE (idx: 23) global_int_nonzero + +; DIS64: Disassembly of section .tdata: +; DIS64: (idx: 23) global_int_nonzero: + +; DIS64: Disassembly of section .tbss: +; DIS64: (idx: 25) intern_int_zero[UL]: + +; REL32: File: {{.*}}aix-tls-ie-xcoff-reloc.ll.tmp.o +; REL32-NEXT: Format: aixcoff-rs6000 +; REL32-NEXT: Arch: powerpc +; REL32-NEXT: AddressSize: 32bit +; REL32-NEXT: Relocations [ +; REL32: Virtual Address: 0xA +; REL32-NEXT: Symbol: intern_int_zero (19) +; REL32-NEXT: IsSigned: No +; REL32-NEXT: FixupBitValue: 0 +; REL32-NEXT: Length: 16 +; REL32-NEXT: Type: R_TOC (0x3) +; REL32-NEXT: } +; REL32: Virtual Address: 0x10 +; REL32-NEXT: Symbol: .__get_tpointer (1) +; REL32-NEXT: IsSigned: No +; REL32-NEXT: FixupBitValue: 0 +; REL32-NEXT: Length: 26 +; REL32-NEXT: Type: R_RBA (0x18) +; REL32-NEXT: } +; REL32: Virtual Address: 0x3A +; REL32-NEXT: Symbol: global_int_nonzero (21) +; REL32-NEXT: IsSigned: No +; REL32-NEXT: FixupBitValue: 0 +; REL32-NEXT: Length: 16 +; REL32-NEXT: Type: R_TOC (0x3) +; REL32-NEXT: } +; REL32: Virtual Address: 0x40 +; REL32-NEXT: Symbol: .__get_tpointer (1) +; REL32-NEXT: IsSigned: No +; REL32-NEXT: FixupBitValue: 0 +; REL32-NEXT: Length: 26 +; REL32-NEXT: Type: R_RBA (0x18) +; REL32-NEXT: } +; REL32: Virtual Address: 0x6A +; REL32-NEXT: Symbol: intern_int_zero (19) +; REL32-NEXT: IsSigned: No +; REL32-NEXT: FixupBitValue: 0 +; REL32-NEXT: Length: 16 +; REL32-NEXT: Type: R_TOC (0x3) +; REL32-NEXT: } +; REL32: Virtual Address: 0x70 +; REL32-NEXT: Symbol: .__get_tpointer (1) +; REL32-NEXT: IsSigned: No +; REL32-NEXT: FixupBitValue: 0 +; REL32-NEXT: Length: 26 +; REL32-NEXT: Type: R_RBA (0x18) +; REL32-NEXT: } +; REL32: Virtual Address: 0xAC +; REL32-NEXT: Symbol: intern_int_zero (27) +; REL32-NEXT: IsSigned: No +; REL32-NEXT: FixupBitValue: 0 +; REL32-NEXT: Length: 32 +; REL32-NEXT: Type: R_TLS_IE (0x21) +; REL32-NEXT: } +; REL32: Virtual Address: 0xB0 +; REL32-NEXT: Symbol: global_int_nonzero (25) +; REL32-NEXT: IsSigned: No +; REL32-NEXT: FixupBitValue: 0 +; REL32-NEXT: Length: 32 +; REL32-NEXT: Type: R_TLS_IE (0x21) +; REL32-NEXT: } + +; SYM32: File: {{.*}}aix-tls-ie-xcoff-reloc.ll.tmp.o +; SYM32-NEXT: Format: aixcoff-rs6000 +; SYM32-NEXT: Arch: powerpc +; SYM32-NEXT: AddressSize: 32bit +; SYM32-NEXT: Symbols [ +; SYM32: Index: 19 +; SYM32-NEXT: Name: intern_int_zero +; SYM32-NEXT: Value (RelocatableAddress): 0xAC +; SYM32-NEXT: Section: .data +; SYM32-NEXT: Type: 0x0 +; SYM32-NEXT: StorageClass: C_HIDEXT (0x6B) +; SYM32-NEXT: NumberOfAuxEntries: 1 +; SYM32-NEXT: CSECT Auxiliary Entry { +; SYM32-NEXT: Index: 20 +; SYM32-NEXT: SectionLen: 4 +; SYM32-NEXT: ParameterHashIndex: 0x0 +; SYM32-NEXT: TypeChkSectNum: 0x0 +; SYM32-NEXT: SymbolAlignmentLog2: 2 +; SYM32-NEXT: SymbolType: XTY_SD (0x1) +; SYM32-NEXT: StorageMappingClass: XMC_TC (0x3) +; SYM32-NEXT: StabInfoIndex: 0x0 +; SYM32-NEXT: StabSectNum: 0x0 +; SYM32-NEXT: } +; SYM32-NEXT: } +; SYM32: Index: 21 +; SYM32-NEXT: Name: global_int_nonzero +; SYM32-NEXT: Value (RelocatableAddress): 0xB0 +; SYM32-NEXT: Section: .data +; SYM32-NEXT: Type: 0x0 +; SYM32-NEXT: StorageClass: C_HIDEXT (0x6B) +; SYM32-NEXT: NumberOfAuxEntries: 1 +; SYM32-NEXT: CSECT Auxiliary Entry { +; SYM32-NEXT: Index: 22 +; SYM32-NEXT: SectionLen: 4 +; SYM32-NEXT: ParameterHashIndex: 0x0 +; SYM32-NEXT: TypeChkSectNum: 0x0 +; SYM32-NEXT: SymbolAlignmentLog2: 2 +; SYM32-NEXT: SymbolType: XTY_SD (0x1) +; SYM32-NEXT: StorageMappingClass: XMC_TC (0x3) +; SYM32-NEXT: StabInfoIndex: 0x0 +; SYM32-NEXT: StabSectNum: 0x0 +; SYM32-NEXT: } +; SYM32-NEXT: } +; SYM32: Index: 25 +; SYM32-NEXT: Name: global_int_nonzero +; SYM32-NEXT: Value (RelocatableAddress): 0x0 +; SYM32-NEXT: Section: .tdata +; SYM32-NEXT: Type: 0x0 +; SYM32-NEXT: StorageClass: C_EXT (0x2) +; SYM32-NEXT: NumberOfAuxEntries: 1 +; SYM32-NEXT: CSECT Auxiliary Entry { +; SYM32-NEXT: Index: 26 +; SYM32-NEXT: ContainingCsectSymbolIndex: 23 +; SYM32-NEXT: ParameterHashIndex: 0x0 +; SYM32-NEXT: TypeChkSectNum: 0x0 +; SYM32-NEXT: SymbolAlignmentLog2: 0 +; SYM32-NEXT: SymbolType: XTY_LD (0x2) +; SYM32-NEXT: StorageMappingClass: XMC_TL (0x14) +; SYM32-NEXT: StabInfoIndex: 0x0 +; SYM32-NEXT: StabSectNum: 0x0 +; SYM32-NEXT: } +; SYM32-NEXT: } +; SYM32: Index: 27 +; SYM32-NEXT: Name: intern_int_zero +; SYM32-NEXT: Value (RelocatableAddress): 0x4 +; SYM32-NEXT: Section: .tbss +; SYM32-NEXT: Type: 0x0 +; SYM32-NEXT: StorageClass: C_HIDEXT (0x6B) +; SYM32-NEXT: NumberOfAuxEntries: 1 +; SYM32-NEXT: CSECT Auxiliary Entry { +; SYM32-NEXT: Index: 28 +; SYM32-NEXT: SectionLen: 4 +; SYM32-NEXT: ParameterHashIndex: 0x0 +; SYM32-NEXT: TypeChkSectNum: 0x0 +; SYM32-NEXT: SymbolAlignmentLog2: 2 +; SYM32-NEXT: SymbolType: XTY_CM (0x3) +; SYM32-NEXT: StorageMappingClass: XMC_UL (0x15) +; SYM32-NEXT: StabInfoIndex: 0x0 +; SYM32-NEXT: StabSectNum: 0x0 +; SYM32-NEXT: } +; SYM32-NEXT: } + +; DIS32: {{.*}}aix-tls-ie-xcoff-reloc.ll.tmp.o: file format aixcoff-rs6000 +; DIS32: Disassembly of section .text: +; DIS32: (idx: 5) .store_intern_int_zero: +; DIS32: R_TOC (idx: 19) intern_int_zero[TC] +; DIS32: R_RBA (idx: 1) .__get_tpointer[PR] +; DIS32: blr +; DIS32: (idx: 7) .load_global_int_nonzero: +; DIS32: R_TOC (idx: 21) global_int_nonzero[TC] +; DIS32: R_RBA (idx: 1) .__get_tpointer[PR] +; DIS32: blr +; DIS32: (idx: 9) .load_intern_int_zero: +; DIS32: R_TOC (idx: 19) intern_int_zero[TC] +; DIS32: R_RBA (idx: 1) .__get_tpointer[PR] +; DIS32: blr + +; DIS32: Disassembly of section .data: +; DIS32: (idx: 11) store_intern_int_zero[DS]: +; DIS32: R_POS (idx: 5) .store_intern_int_zero +; DIS32: R_POS (idx: 17) TOC[TC0] +; DIS32: (idx: 13) load_global_int_nonzero[DS]: +; DIS32: R_POS (idx: 7) .load_global_int_nonzero +; DIS32: R_POS (idx: 17) TOC[TC0] +; DIS32: (idx: 15) load_intern_int_zero[DS]: +; DIS32: R_POS (idx: 9) .load_intern_int_zero +; DIS32: R_POS (idx: 17) TOC[TC0] +; DIS32: (idx: 19) intern_int_zero[TC]: +; DIS32: R_TLS_IE (idx: 27) intern_int_zero[UL] +; DIS32: (idx: 21) global_int_nonzero[TC]: +; DIS32: R_TLS_IE (idx: 25) global_int_nonzero + +; DIS32: Disassembly of section .tdata: +; DIS32: (idx: 25) global_int_nonzero: + +; DIS32: Disassembly of section .tbss: +; DIS32: (idx: 27) intern_int_zero[UL]: