Index: llvm/lib/Target/PowerPC/PPCISelLowering.cpp =================================================================== --- llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -2539,12 +2539,19 @@ } bool PPCTargetLowering::SelectAddressPCRel(SDValue N, SDValue &Base) const { + // This is a materialize PC Relative node. Always select this as PC Relative. + if (N.getOpcode() == PPCISD::MAT_PCREL_ADDR) { + Base = N; + return true; + } + ConstantPoolSDNode *ConstPoolNode = dyn_cast(N.getNode()); - bool HasFlag = ConstPoolNode && - ConstPoolNode->getTargetFlags() == PPCII::MO_PCREL_FLAG; - bool HasNode = N.getOpcode() == PPCISD::MAT_PCREL_ADDR; - if (HasFlag || HasNode) { + bool ConstPool = ConstPoolNode && + ConstPoolNode->getTargetFlags() == PPCII::MO_PCREL_FLAG; + GlobalAddressSDNode *GSDN = dyn_cast(N.getNode()); + bool Global = GSDN && GSDN->getTargetFlags() == PPCII::MO_PCREL_FLAG; + if (ConstPool || Global) { Base = N; return true; } @@ -2999,6 +3006,13 @@ // 64-bit SVR4 ABI & AIX ABI code is always position-independent. // The actual address of the GlobalValue is stored in the TOC. if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) { + if (!isAccessedAsGotIndirect(Op) && Subtarget.hasPCRelativeMemops()) { + LLVM_DEBUG(dbgs() << "PPCTargetLowering::getAddrMedian\n"); + EVT Ty = getPointerTy(DAG.getDataLayout()); + SDValue GA = DAG.getTargetGlobalAddress(GV, DL, Ty, GSDN->getOffset(), + PPCII::MO_PCREL_FLAG); + return DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA); + } setUsesTOCBasePtr(DAG); SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset()); return getTOCEntry(DAG, DL, GA); Index: llvm/lib/Target/PowerPC/PPCInstrInfo.td =================================================================== --- llvm/lib/Target/PowerPC/PPCInstrInfo.td +++ llvm/lib/Target/PowerPC/PPCInstrInfo.td @@ -973,6 +973,9 @@ def xoaddr : ComplexPattern; +// PC Relative Address +def pcreladdr : ComplexPattern; + // The address in a single register. This is used with the SjLj // pseudo-instructions. def addr : ComplexPattern; @@ -980,9 +983,6 @@ /// This is just the offset part of iaddr, used for preinc. def iaddroff : ComplexPattern; -// PC Relative Address -def pcreladdr : ComplexPattern; - //===----------------------------------------------------------------------===// // PowerPC Instruction Predicate Definitions. def In32BitMode : Predicate<"!PPCSubTarget->isPPC64()">; Index: llvm/lib/Target/PowerPC/PPCInstrPrefix.td =================================================================== --- llvm/lib/Target/PowerPC/PPCInstrPrefix.td +++ llvm/lib/Target/PowerPC/PPCInstrPrefix.td @@ -347,30 +347,153 @@ // added complexity. For the future we should refactor the addessing selection // on PowerPC so that this AddedComplexity=500 will not be required. let Predicates = [PCRelativeMemops], AddedComplexity = 500 in { + // Load i32 + def : Pat<(i32 (zextloadi8 (MATpcreladdr pcreladdr:$ga))), + (PLBZpc $ga, 0)>; + def : Pat<(i32 (extloadi8 (MATpcreladdr pcreladdr:$ga))), + (PLBZpc $ga, 0)>; + def : Pat<(i32 (sextloadi16 (MATpcreladdr pcreladdr:$ga))), + (PLHApc $ga, 0)>; + def : Pat<(i32 (zextloadi16 (MATpcreladdr pcreladdr:$ga))), + (PLHZpc $ga, 0)>; + def : Pat<(i32 (extloadi16 (MATpcreladdr pcreladdr:$ga))), + (PLHZpc $ga, 0)>; + def : Pat<(i32 (load (MATpcreladdr pcreladdr:$ga))), (PLWZpc $ga, 0)>; + + // Store i32 + def : Pat<(truncstorei8 i32:$RS, (MATpcreladdr pcreladdr:$ga)), + (PSTBpc $RS, $ga, 0)>; + def : Pat<(truncstorei16 i32:$RS, (MATpcreladdr pcreladdr:$ga)), + (PSTHpc $RS, $ga, 0)>; + def : Pat<(store i32:$RS, (MATpcreladdr pcreladdr:$ga)), + (PSTWpc $RS, $ga, 0)>; + + // Load i64 + def : Pat<(i64 (zextloadi8 (MATpcreladdr pcreladdr:$ga))), + (PLBZ8pc $ga, 0)>; + def : Pat<(i64 (extloadi8 (MATpcreladdr pcreladdr:$ga))), + (PLBZ8pc $ga, 0)>; + def : Pat<(i64 (sextloadi16 (MATpcreladdr pcreladdr:$ga))), + (PLHA8pc $ga, 0)>; + def : Pat<(i64 (zextloadi16 (MATpcreladdr pcreladdr:$ga))), + (PLHZ8pc $ga, 0)>; + def : Pat<(i64 (extloadi16 (MATpcreladdr pcreladdr:$ga))), + (PLHZ8pc $ga, 0)>; + def : Pat<(i64 (zextloadi32 (MATpcreladdr pcreladdr:$ga))), + (PLWZ8pc $ga, 0)>; + def : Pat<(i64 (sextloadi32 (MATpcreladdr pcreladdr:$ga))), + (PLWA8pc $ga, 0)>; + def : Pat<(i64 (extloadi32 (MATpcreladdr pcreladdr:$ga))), + (PLWZ8pc $ga, 0)>; + def : Pat<(i64 (load (MATpcreladdr pcreladdr:$ga))), (PLDpc $ga, 0)>; + + // Store i64 + def : Pat<(truncstorei8 i64:$RS, (MATpcreladdr pcreladdr:$ga)), + (PSTB8pc $RS, $ga, 0)>; + def : Pat<(truncstorei16 i64:$RS, (MATpcreladdr pcreladdr:$ga)), + (PSTH8pc $RS, $ga, 0)>; + def : Pat<(truncstorei32 i64:$RS, (MATpcreladdr pcreladdr:$ga)), + (PSTW8pc $RS, $ga, 0)>; + def : Pat<(store i64:$RS, (MATpcreladdr pcreladdr:$ga)), + (PSTDpc $RS, $ga, 0)>; + // Load f32 def : Pat<(f32 (load (MATpcreladdr pcreladdr:$cp))), (PLFSpc $cp, 0)>; + // Store f32 + def : Pat<(store f32:$FRS, (MATpcreladdr pcreladdr:$ga)), + (PSTFSpc $FRS, $ga, 0)>; + // Load f64 def : Pat<(f64 (extloadf32 (MATpcreladdr pcreladdr:$cp))), (COPY_TO_REGCLASS (PLFSpc $cp, 0), VSFRC)>; def : Pat<(f64 (load (MATpcreladdr pcreladdr:$cp))), (PLFDpc $cp, 0)>; + // Store f64 + def : Pat<(store f64:$FRS, (MATpcreladdr pcreladdr:$ga)), + (PSTFDpc $FRS, $ga, 0)>; + // Load f128 def : Pat<(f128 (load (MATpcreladdr pcreladdr:$cp))), (COPY_TO_REGCLASS (PLXVpc $cp, 0), VRRC)>; + // Store f128 + def : Pat<(store f128:$XS, (MATpcreladdr pcreladdr:$ga)), + (PSTXVpc (COPY_TO_REGCLASS $XS, VSRC), $ga, 0)>; + // Load v4i32 def : Pat<(v4i32 (load (MATpcreladdr pcreladdr:$cp))), (PLXVpc $cp, 0)>; + // Store v4i32 + def : Pat<(store v4i32:$XS, (MATpcreladdr pcreladdr:$ga)), + (PSTXVpc $XS, $ga, 0)>; + // Load v2i64 def : Pat<(v2i64 (load (MATpcreladdr pcreladdr:$cp))), (PLXVpc $cp, 0)>; + // Store v2i64 + def : Pat<(store v2i64:$XS, (MATpcreladdr pcreladdr:$ga)), + (PSTXVpc $XS, $ga, 0)>; + // Load v4f32 def : Pat<(v4f32 (load (MATpcreladdr pcreladdr:$cp))), (PLXVpc $cp, 0)>; + // Store v4f32 + def : Pat<(store v4f32:$XS, (MATpcreladdr pcreladdr:$ga)), + (PSTXVpc $XS, $ga, 0)>; + // Load v2f64 def : Pat<(v2f64 (load (MATpcreladdr pcreladdr:$cp))), (PLXVpc $cp, 0)>; + // Store v2f64 + def : Pat<(store v2f64:$XS, (MATpcreladdr pcreladdr:$ga)), + (PSTXVpc $XS, $ga, 0)>; + + // Atomic Load + def : Pat<(atomic_load_8 (MATpcreladdr pcreladdr:$ga)), + (PLBZpc $ga, 0)>; + def : Pat<(atomic_load_16 (MATpcreladdr pcreladdr:$ga)), + (PLHZpc $ga, 0)>; + def : Pat<(atomic_load_32 (MATpcreladdr pcreladdr:$ga)), + (PLWZpc $ga, 0)>; + def : Pat<(atomic_load_64 (MATpcreladdr pcreladdr:$ga)), + (PLDpc $ga, 0)>; + + // Atomic Store + def : Pat<(atomic_store_8 (MATpcreladdr pcreladdr:$ga), i32:$RS), + (PSTBpc $RS, $ga, 0)>; + def : Pat<(atomic_store_16 (MATpcreladdr pcreladdr:$ga), i32:$RS), + (PSTHpc $RS, $ga, 0)>; + def : Pat<(atomic_store_32 (MATpcreladdr pcreladdr:$ga), i32:$RS), + (PSTWpc $RS, $ga, 0)>; + def : Pat<(atomic_store_8 (MATpcreladdr pcreladdr:$ga), i64:$RS), + (PSTB8pc $RS, $ga, 0)>; + def : Pat<(atomic_store_16 (MATpcreladdr pcreladdr:$ga), i64:$RS), + (PSTH8pc $RS, $ga, 0)>; + def : Pat<(atomic_store_32 (MATpcreladdr pcreladdr:$ga), i64:$RS), + (PSTW8pc $RS, $ga, 0)>; + def : Pat<(atomic_store_64 (MATpcreladdr pcreladdr:$ga), i64:$RS), + (PSTDpc $RS, $ga, 0)>; + + // Special Cases For PPCstore_scal_int_from_vsr + def : Pat<(PPCstore_scal_int_from_vsr + (f64 (PPCcv_fp_to_sint_in_vsr f64:$src)), + (MATpcreladdr pcreladdr:$dst), 8), + (PSTXSDpc (XSCVDPSXDS f64:$src), $dst, 0)>; + def : Pat<(PPCstore_scal_int_from_vsr + (f64 (PPCcv_fp_to_sint_in_vsr f128:$src)), + (MATpcreladdr pcreladdr:$dst), 8), + (PSTXSDpc (COPY_TO_REGCLASS (XSCVQPSDZ f128:$src), VFRC), $dst, 0)>; + + def : Pat<(PPCstore_scal_int_from_vsr + (f64 (PPCcv_fp_to_uint_in_vsr f64:$src)), + (MATpcreladdr pcreladdr:$dst), 8), + (PSTXSDpc (XSCVDPUXDS f64:$src), $dst, 0)>; + def : Pat<(PPCstore_scal_int_from_vsr + (f64 (PPCcv_fp_to_uint_in_vsr f128:$src)), + (MATpcreladdr pcreladdr:$dst), 8), + (PSTXSDpc (COPY_TO_REGCLASS (XSCVQPUDZ f128:$src), VFRC), $dst, 0)>; + // If the MATpcreladdr node is not caught by any other pattern it should be // caught here and turned into a paddi instruction to meterialize the address. def : Pat<(MATpcreladdr pcreladdr:$cp), (PADDI8pc 0, $cp)>; Index: llvm/test/CodeGen/PowerPC/csr-split.ll =================================================================== --- llvm/test/CodeGen/PowerPC/csr-split.ll +++ llvm/test/CodeGen/PowerPC/csr-split.ll @@ -12,7 +12,8 @@ define dso_local signext i32 @test1(i32* %b) local_unnamed_addr { ; CHECK-FUTURE-LABEL: test1: -; CHECK-FUTURE: # %bb.0: # %entry +; CHECK-FUTURE: .localentry test1, 1 +; CHECK-FUTURE-NEXT: # %bb.0: # %entry ; CHECK-FUTURE-NEXT: mflr r0 ; CHECK-FUTURE-NEXT: .cfi_def_cfa_offset 48 ; CHECK-FUTURE-NEXT: .cfi_offset lr, 16 @@ -20,15 +21,13 @@ ; CHECK-FUTURE-NEXT: std r30, -16(r1) # 8-byte Folded Spill ; CHECK-FUTURE-NEXT: std r0, 16(r1) ; CHECK-FUTURE-NEXT: stdu r1, -48(r1) -; CHECK-FUTURE-NEXT: addis r4, r2, a@toc@ha -; CHECK-FUTURE-NEXT: lwa r4, a@toc@l(r4) +; CHECK-FUTURE-NEXT: plwa r4, a@PCREL(0), 1 ; CHECK-FUTURE-NEXT: mr r30, r3 ; CHECK-FUTURE-NEXT: cmpld r4, r3 ; CHECK-FUTURE-NEXT: # implicit-def: $r3 ; CHECK-FUTURE-NEXT: bne cr0, .LBB0_2 ; CHECK-FUTURE-NEXT: # %bb.1: # %if.then -; CHECK-FUTURE-NEXT: bl callVoid -; CHECK-FUTURE-NEXT: nop +; CHECK-FUTURE-NEXT: bl callVoid@notoc ; CHECK-FUTURE-NEXT: mr r3, r30 ; CHECK-FUTURE-NEXT: bl callNonVoid@notoc ; CHECK-FUTURE-NEXT: .LBB0_2: # %if.end @@ -119,7 +118,8 @@ define dso_local signext i32 @test2(i32* %p1) local_unnamed_addr { ; CHECK-FUTURE-LABEL: test2: -; CHECK-FUTURE: # %bb.0: # %entry +; CHECK-FUTURE: .localentry test2, 1 +; CHECK-FUTURE-NEXT: # %bb.0: # %entry ; CHECK-FUTURE-NEXT: mflr r0 ; CHECK-FUTURE-NEXT: .cfi_def_cfa_offset 48 ; CHECK-FUTURE-NEXT: .cfi_offset lr, 16 @@ -132,13 +132,11 @@ ; CHECK-FUTURE-NEXT: li r3, 0 ; CHECK-FUTURE-NEXT: beq cr0, .LBB1_3 ; CHECK-FUTURE-NEXT: # %bb.1: # %if.end -; CHECK-FUTURE-NEXT: addis r4, r2, a@toc@ha -; CHECK-FUTURE-NEXT: lwa r4, a@toc@l(r4) +; CHECK-FUTURE-NEXT: plwa r4, a@PCREL(0), 1 ; CHECK-FUTURE-NEXT: cmpld r4, r30 ; CHECK-FUTURE-NEXT: bne cr0, .LBB1_3 ; CHECK-FUTURE-NEXT: # %bb.2: # %if.then2 -; CHECK-FUTURE-NEXT: bl callVoid -; CHECK-FUTURE-NEXT: nop +; CHECK-FUTURE-NEXT: bl callVoid@notoc ; CHECK-FUTURE-NEXT: mr r3, r30 ; CHECK-FUTURE-NEXT: bl callNonVoid@notoc ; CHECK-FUTURE-NEXT: .LBB1_3: # %return Index: llvm/test/CodeGen/PowerPC/global-address-non-got-indirect-access.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/PowerPC/global-address-non-got-indirect-access.ll @@ -0,0 +1,541 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-linux-gnu \ +; RUN: -mcpu=future -enable-ppc-quad-precision -ppc-asm-full-reg-names \ +; RUN: -ppc-vsr-nums-as-vr < %s | FileCheck %s + + @_ZL13StaticBoolVar = internal unnamed_addr global i8 0, align 1 +@_ZL19StaticSignedCharVar = internal unnamed_addr global i8 0, align 1 +@_ZL21StaticUnsignedCharVar = internal unnamed_addr global i8 0, align 1 +@_ZL20StaticSignedShortVar = internal unnamed_addr global i16 0, align 2 +@_ZL22StaticUnsignedShortVar = internal unnamed_addr global i16 0, align 2 +@_ZL18StaticSignedIntVar = internal unnamed_addr global i32 0, align 4 +@_ZL20StaticUnsignedIntVar = internal unnamed_addr global i32 0, align 4 +@_ZL19StaticSignedLongVar = internal unnamed_addr global i64 0, align 8 +@_ZL14StaticFloatVar = internal unnamed_addr global float 0.000000e+00, align 4 +@_ZL15StaticDoubleVar = internal unnamed_addr global double 0.000000e+00, align 8 +@_ZL19StaticLongDoubleVar = internal unnamed_addr global ppc_fp128 0xM00000000000000000000000000000000, align 16 +@_ZL23StaticSigned__Int128Var = internal unnamed_addr global i128 0, align 16 +@_ZL19Static__Float128Var = internal unnamed_addr global fp128 0xL00000000000000000000000000000000, align 16 +@_ZL25StaticVectorSignedCharVar = internal unnamed_addr global <16 x i8> zeroinitializer, align 16 +@_ZL26StaticVectorSignedShortVar = internal unnamed_addr global <8 x i16> zeroinitializer, align 16 +@_ZL24StaticVectorSignedIntVar = internal unnamed_addr global <4 x i32> zeroinitializer, align 16 +@_ZL29StaticVectorSignedLongLongVar = internal unnamed_addr global <2 x i64> zeroinitializer, align 16 +@_ZL29StaticVectorSigned__Int128Var = internal unnamed_addr global <1 x i128> zeroinitializer, align 16 +@_ZL20StaticVectorFloatVar = internal unnamed_addr global <4 x float> zeroinitializer, align 16 +@_ZL21StaticVectorDoubleVar = internal unnamed_addr global <2 x double> zeroinitializer, align 16 + + define zeroext i1 @_Z17ReadStaticBoolVarv() { +; CHECK-LABEL: _Z17ReadStaticBoolVarv: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: plbz r3, _ZL13StaticBoolVar@PCREL(0), 1 +; CHECK-NEXT: blr +entry: + %0 = load i8, i8* @_ZL13StaticBoolVar, align 1, !range !0 + %tobool = icmp ne i8 %0, 0 + ret i1 %tobool +} + + define signext i8 @_Z23ReadStaticSignedCharVarv() { +; CHECK-LABEL: _Z23ReadStaticSignedCharVarv: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: plbz r3, _ZL19StaticSignedCharVar@PCREL(0), 1 +; CHECK-NEXT: extsb r3, r3 +; CHECK-NEXT: blr +entry: + %0 = load i8, i8* @_ZL19StaticSignedCharVar, align 1 + ret i8 %0 +} + + define zeroext i8 @_Z25ReadStaticUnsignedCharVarv() { +; CHECK-LABEL: _Z25ReadStaticUnsignedCharVarv: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: plbz r3, _ZL21StaticUnsignedCharVar@PCREL(0), 1 +; CHECK-NEXT: blr +entry: + %0 = load i8, i8* @_ZL21StaticUnsignedCharVar, align 1 + ret i8 %0 +} + + define signext i16 @_Z24ReadStaticSignedShortVarv() { +; CHECK-LABEL: _Z24ReadStaticSignedShortVarv: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: plha r3, _ZL20StaticSignedShortVar@PCREL(0), 1 +; CHECK-NEXT: blr +entry: + %0 = load i16, i16* @_ZL20StaticSignedShortVar, align 2 + ret i16 %0 +} + + define zeroext i16 @_Z26ReadStaticUnsignedShortVarv() { +; CHECK-LABEL: _Z26ReadStaticUnsignedShortVarv: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: plhz r3, _ZL22StaticUnsignedShortVar@PCREL(0), 1 +; CHECK-NEXT: blr +entry: + %0 = load i16, i16* @_ZL22StaticUnsignedShortVar, align 2 + ret i16 %0 +} + + define signext i32 @_Z22ReadStaticSignedIntVarv() { +; CHECK-LABEL: _Z22ReadStaticSignedIntVarv: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: plwa r3, _ZL18StaticSignedIntVar@PCREL(0), 1 +; CHECK-NEXT: blr +entry: + %0 = load i32, i32* @_ZL18StaticSignedIntVar, align 4 + ret i32 %0 +} + + define zeroext i32 @_Z24ReadStaticUnsignedIntVarv() { +; CHECK-LABEL: _Z24ReadStaticUnsignedIntVarv: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: plwz r3, _ZL20StaticUnsignedIntVar@PCREL(0), 1 +; CHECK-NEXT: blr +entry: + %0 = load i32, i32* @_ZL20StaticUnsignedIntVar, align 4 + ret i32 %0 +} + + ; It is the same as unsigned long version +define i64 @_Z23ReadStaticSignedLongVarv() { +; CHECK-LABEL: _Z23ReadStaticSignedLongVarv: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: pld r3, _ZL19StaticSignedLongVar@PCREL(0), 1 +; CHECK-NEXT: blr +entry: + %0 = load i64, i64* @_ZL19StaticSignedLongVar, align 8 + ret i64 %0 +} + + define float @_Z18ReadStaticFloatVarv() { +; CHECK-LABEL: _Z18ReadStaticFloatVarv: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: plfs f1, _ZL14StaticFloatVar@PCREL(0), 1 +; CHECK-NEXT: blr +entry: + %0 = load float, float* @_ZL14StaticFloatVar, align 4 + ret float %0 +} + + define double @_Z19ReadStaticDoubleVarv() { +; CHECK-LABEL: _Z19ReadStaticDoubleVarv: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: plfd f1, _ZL15StaticDoubleVar@PCREL(0), 1 +; CHECK-NEXT: blr +entry: + %0 = load double, double* @_ZL15StaticDoubleVar, align 8 + ret double %0 +} + + ; FIXME: +define ppc_fp128 @_Z23ReadStaticLongDoubleVarv() { +; CHECK-LABEL: _Z23ReadStaticLongDoubleVarv: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: paddi r3, 0, _ZL19StaticLongDoubleVar@PCREL, 1 +; CHECK-NEXT: lfd f2, 8(r3) +; CHECK-NEXT: plfd f1, _ZL19StaticLongDoubleVar@PCREL(0), 1 +; CHECK-NEXT: blr +entry: + %0 = load ppc_fp128, ppc_fp128* @_ZL19StaticLongDoubleVar, align 16 + ret ppc_fp128 %0 +} + + ; FIXME: +define i128 @_Z27ReadStaticSigned__Int128Varv() { +; CHECK-LABEL: _Z27ReadStaticSigned__Int128Varv: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: paddi r3, 0, _ZL23StaticSigned__Int128Var@PCREL, 1 +; CHECK-NEXT: ld r4, 8(r3) +; CHECK-NEXT: pld r3, _ZL23StaticSigned__Int128Var@PCREL(0), 1 +; CHECK-NEXT: blr +entry: + %0 = load i128, i128* @_ZL23StaticSigned__Int128Var, align 16 + ret i128 %0 +} + + define fp128 @_Z23ReadStatic__Float128Varv() { +; CHECK-LABEL: _Z23ReadStatic__Float128Varv: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: plxv v2, _ZL19Static__Float128Var@PCREL(0), 1 +; CHECK-NEXT: blr +entry: + %0 = load fp128, fp128* @_ZL19Static__Float128Var, align 16 + ret fp128 %0 +} + + define <16 x i8> @_Z29ReadStaticVectorSignedCharVarv() { +; CHECK-LABEL: _Z29ReadStaticVectorSignedCharVarv: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: plxv v2, _ZL25StaticVectorSignedCharVar@PCREL(0), 1 +; CHECK-NEXT: blr +entry: + %0 = load <16 x i8>, <16 x i8>* @_ZL25StaticVectorSignedCharVar, align 16 + ret <16 x i8> %0 +} + + define <8 x i16> @_Z30ReadStaticVectorSignedShortVarv() { +; CHECK-LABEL: _Z30ReadStaticVectorSignedShortVarv: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: plxv v2, _ZL26StaticVectorSignedShortVar@PCREL(0), 1 +; CHECK-NEXT: blr +entry: + %0 = load <8 x i16>, <8 x i16>* @_ZL26StaticVectorSignedShortVar, align 16 + ret <8 x i16> %0 +} + + define <4 x i32> @_Z28ReadStaticVectorSignedIntVarv() { +; CHECK-LABEL: _Z28ReadStaticVectorSignedIntVarv: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: plxv v2, _ZL24StaticVectorSignedIntVar@PCREL(0), 1 +; CHECK-NEXT: blr +entry: + %0 = load <4 x i32>, <4 x i32>* @_ZL24StaticVectorSignedIntVar, align 16 + ret <4 x i32> %0 +} + + define <2 x i64> @_Z33ReadStaticVectorSignedLongLongVarv() { +; CHECK-LABEL: _Z33ReadStaticVectorSignedLongLongVarv: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: plxv v2, _ZL29StaticVectorSignedLongLongVar@PCREL(0), 1 +; CHECK-NEXT: blr +entry: + %0 = load <2 x i64>, <2 x i64>* @_ZL29StaticVectorSignedLongLongVar, align 16 + ret <2 x i64> %0 +} + + define <1 x i128> @_Z33ReadStaticVectorSigned__Int128Varv() { +; CHECK-LABEL: _Z33ReadStaticVectorSigned__Int128Varv: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: plxv v2, _ZL29StaticVectorSigned__Int128Var@PCREL(0), 1 +; CHECK-NEXT: blr +entry: + %0 = load <1 x i128>, <1 x i128>* @_ZL29StaticVectorSigned__Int128Var, align 16 + ret <1 x i128> %0 +} + + define <4 x float> @_Z24ReadStaticVectorFloatVarv() { +; CHECK-LABEL: _Z24ReadStaticVectorFloatVarv: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: plxv v2, _ZL20StaticVectorFloatVar@PCREL(0), 1 +; CHECK-NEXT: blr +entry: + %0 = load <4 x float>, <4 x float>* @_ZL20StaticVectorFloatVar, align 16 + ret <4 x float> %0 +} + + define <2 x double> @_Z25ReadStaticVectorDoubleVarv() { +; CHECK-LABEL: _Z25ReadStaticVectorDoubleVarv: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: plxv v2, _ZL21StaticVectorDoubleVar@PCREL(0), 1 +; CHECK-NEXT: blr +entry: + %0 = load <2 x double>, <2 x double>* @_ZL21StaticVectorDoubleVar, align 16 + ret <2 x double> %0 +} + + !0 = !{i8 0, i8 2} + + define void @_Z18WriteStaticBoolVarb(i1 zeroext %val) { +; CHECK-LABEL: _Z18WriteStaticBoolVarb: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: pstb r3, _ZL13StaticBoolVar@PCREL(0), 1 +; CHECK-NEXT: blr +entry: + %frombool = zext i1 %val to i8 + store i8 %frombool, i8* @_ZL13StaticBoolVar, align 1 + ret void +} + + define void @_Z24WriteStaticSignedCharVara(i8 signext %val) { +; CHECK-LABEL: _Z24WriteStaticSignedCharVara: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: pstb r3, _ZL19StaticSignedCharVar@PCREL(0), 1 +; CHECK-NEXT: blr +entry: + store i8 %val, i8* @_ZL19StaticSignedCharVar, align 1 + ret void +} + + define void @_Z26WriteStaticUnsignedCharVarh(i8 zeroext %val){ +; CHECK-LABEL: _Z26WriteStaticUnsignedCharVarh: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: pstb r3, _ZL21StaticUnsignedCharVar@PCREL(0), 1 +; CHECK-NEXT: blr +entry: + store i8 %val, i8* @_ZL21StaticUnsignedCharVar, align 1 + ret void +} + + define void @_Z25WriteStaticSignedShortVars(i16 signext %val) { +; CHECK-LABEL: _Z25WriteStaticSignedShortVars: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: psth r3, _ZL20StaticSignedShortVar@PCREL(0), 1 +; CHECK-NEXT: blr +entry: + store i16 %val, i16* @_ZL20StaticSignedShortVar, align 2 + ret void +} + + define void @_Z27WriteStaticUnsignedShortVart(i16 zeroext %val) { +; CHECK-LABEL: _Z27WriteStaticUnsignedShortVart: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: psth r3, _ZL22StaticUnsignedShortVar@PCREL(0), 1 +; CHECK-NEXT: blr +entry: + store i16 %val, i16* @_ZL22StaticUnsignedShortVar, align 2 + ret void +} + + define void @_Z23WriteStaticSignedIntVari(i32 signext %val) { +; CHECK-LABEL: _Z23WriteStaticSignedIntVari: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: pstw r3, _ZL18StaticSignedIntVar@PCREL(0), 1 +; CHECK-NEXT: blr +entry: + store i32 %val, i32* @_ZL18StaticSignedIntVar, align 4 + ret void +} + + define void @_Z25WriteStaticUnsignedIntVarj(i32 zeroext %val) { +; CHECK-LABEL: _Z25WriteStaticUnsignedIntVarj: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: pstw r3, _ZL20StaticUnsignedIntVar@PCREL(0), 1 +; CHECK-NEXT: blr +entry: + store i32 %val, i32* @_ZL20StaticUnsignedIntVar, align 4 + ret void +} + + define void @_Z24WriteStaticSignedLongVarl(i64 %val) { +; CHECK-LABEL: _Z24WriteStaticSignedLongVarl: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: pstd r3, _ZL19StaticSignedLongVar@PCREL(0), 1 +; CHECK-NEXT: blr +entry: + store i64 %val, i64* @_ZL19StaticSignedLongVar, align 8 + ret void +} + + define void @_Z19WriteStaticFloatVarf(float %val) { +; CHECK-LABEL: _Z19WriteStaticFloatVarf: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: pstfs f1, _ZL14StaticFloatVar@PCREL(0), 1 +; CHECK-NEXT: blr +entry: + store float %val, float* @_ZL14StaticFloatVar, align 4 + ret void +} + + define void @_Z20WriteStaticDoubleVard(double %val) { +; CHECK-LABEL: _Z20WriteStaticDoubleVard: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: pstfd f1, _ZL15StaticDoubleVar@PCREL(0), 1 +; CHECK-NEXT: blr +entry: + store double %val, double* @_ZL15StaticDoubleVar, align 8 + ret void +} + + ; FIXME: +define void @_Z24WriteStaticLongDoubleVarg(ppc_fp128 %val) { +; CHECK-LABEL: _Z24WriteStaticLongDoubleVarg: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: paddi r3, 0, _ZL19StaticLongDoubleVar@PCREL, 1 +; CHECK-NEXT: stfd f2, 8(r3) +; CHECK-NEXT: pstfd f1, _ZL19StaticLongDoubleVar@PCREL(0), 1 +; CHECK-NEXT: blr +entry: + store ppc_fp128 %val, ppc_fp128* @_ZL19StaticLongDoubleVar, align 16 + ret void +} + + ; FIXME: +define void @_Z28WriteStaticSigned__Int128Varn(i128 %val) { +; CHECK-LABEL: _Z28WriteStaticSigned__Int128Varn: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: paddi r5, 0, _ZL23StaticSigned__Int128Var@PCREL, 1 +; CHECK-NEXT: std r4, 8(r5) +; CHECK-NEXT: pstd r3, _ZL23StaticSigned__Int128Var@PCREL(0), 1 +; CHECK-NEXT: blr +entry: + store i128 %val, i128* @_ZL23StaticSigned__Int128Var, align 16 + ret void +} + + define void @_Z24WriteStatic__Float128Varu9__ieee128(fp128 %val) { +; CHECK-LABEL: _Z24WriteStatic__Float128Varu9__ieee128: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: pstxv v2, _ZL19Static__Float128Var@PCREL(0), 1 +; CHECK-NEXT: blr +entry: + store fp128 %val, fp128* @_ZL19Static__Float128Var, align 16 + ret void +} + + define void @_Z30WriteStaticVectorSignedCharVarDv16_a(<16 x i8> %val) { +; CHECK-LABEL: _Z30WriteStaticVectorSignedCharVarDv16_a: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: pstxv v2, _ZL25StaticVectorSignedCharVar@PCREL(0), 1 +; CHECK-NEXT: blr +entry: + store <16 x i8> %val, <16 x i8>* @_ZL25StaticVectorSignedCharVar, align 16 + ret void +} + + define void @_Z31WriteStaticVectorSignedShortVarDv8_s(<8 x i16> %val) { +; CHECK-LABEL: _Z31WriteStaticVectorSignedShortVarDv8_s: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: pstxv v2, _ZL26StaticVectorSignedShortVar@PCREL(0), 1 +; CHECK-NEXT: blr +entry: + store <8 x i16> %val, <8 x i16>* @_ZL26StaticVectorSignedShortVar, align 16 + ret void +} + + define void @_Z29WriteStaticVectorSignedIntVarDv4_i(<4 x i32> %val) { +; CHECK-LABEL: _Z29WriteStaticVectorSignedIntVarDv4_i: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: pstxv v2, _ZL24StaticVectorSignedIntVar@PCREL(0), 1 +; CHECK-NEXT: blr +entry: + store <4 x i32> %val, <4 x i32>* @_ZL24StaticVectorSignedIntVar, align 16 + ret void +} + + define void @_Z34WriteStaticVectorSignedLongLongVarDv2_x(<2 x i64> %val) { +; CHECK-LABEL: _Z34WriteStaticVectorSignedLongLongVarDv2_x: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: pstxv v2, _ZL29StaticVectorSignedLongLongVar@PCREL(0), 1 +; CHECK-NEXT: blr +entry: + store <2 x i64> %val, <2 x i64>* @_ZL29StaticVectorSignedLongLongVar, align 16 + ret void +} + + define void @_Z34WriteStaticVectorSigned__Int128VarDv1_n(<1 x i128> %val) { +; CHECK-LABEL: _Z34WriteStaticVectorSigned__Int128VarDv1_n: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: pstxv v2, _ZL29StaticVectorSigned__Int128Var@PCREL(0), 1 +; CHECK-NEXT: blr +entry: + store <1 x i128> %val, <1 x i128>* @_ZL29StaticVectorSigned__Int128Var, align 16 + ret void +} + + define void @_Z25WriteStaticVectorFloatVarDv4_f(<4 x float> %val) { +; CHECK-LABEL: _Z25WriteStaticVectorFloatVarDv4_f: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: pstxv v2, _ZL20StaticVectorFloatVar@PCREL(0), 1 +; CHECK-NEXT: blr +entry: + store <4 x float> %val, <4 x float>* @_ZL20StaticVectorFloatVar, align 16 + ret void +} + + define void @_Z26WriteStaticVectorDoubleVarDv2_d(<2 x double> %val) { +; CHECK-LABEL: _Z26WriteStaticVectorDoubleVarDv2_d: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: pstxv v2, _ZL21StaticVectorDoubleVar@PCREL(0), 1 +; CHECK-NEXT: blr +entry: + store <2 x double> %val, <2 x double>* @_ZL21StaticVectorDoubleVar, align 16 + ret void +} + + @_ZL3ptr = internal unnamed_addr global i32* null, align 8 +define void @_Z14WriteStaticPtrv() { +; CHECK-LABEL: _Z14WriteStaticPtrv: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: pld r3, _ZL3ptr@PCREL(0), 1 +; CHECK-NEXT: li r4, 3 +; CHECK-NEXT: stw r4, 0(r3) +; CHECK-NEXT: blr +entry: + %0 = load i32*, i32** @_ZL3ptr, align 8 + store i32 3, i32* %0, align 4 + ret void +} + + @.str = private unnamed_addr constant [13 x i8] c"Hello World\0A\00", align 1 +@str = dso_local local_unnamed_addr global i8* getelementptr inbounds ([13 x i8], [13 x i8]* @.str, i64 0, i64 0), align 8 + + define zeroext i8 @_Z17Char0InStrLiteralv() { +; CHECK-LABEL: _Z17Char0InStrLiteralv: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: pld r3, str@PCREL(0), 1 +; CHECK-NEXT: lbz r3, 0(r3) +; CHECK-NEXT: blr +entry: + %0 = load i8*, i8** @str, align 8 + %1 = load i8, i8* %0, align 1 + ret i8 %1 +} + + define zeroext i8 @_Z17Char3InStrLiteralv() { +; CHECK-LABEL: _Z17Char3InStrLiteralv: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: pld r3, str@PCREL(0), 1 +; CHECK-NEXT: lbz r3, 3(r3) +; CHECK-NEXT: blr +entry: + %0 = load i8*, i8** @str, align 8 + %arrayidx = getelementptr inbounds i8, i8* %0, i64 3 + %1 = load i8, i8* %arrayidx, align 1 + ret i8 %1 +} + + @_ZL5array = internal global [10 x i32] zeroinitializer, align 4 + + ; FIXME: +define signext i32 @_Z15ReadStaticArrayv() { +; CHECK-LABEL: _Z15ReadStaticArrayv: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: paddi r3, 0, _ZL5array@PCREL, 1 +; CHECK-NEXT: lwa r3, 12(r3) +; CHECK-NEXT: blr +entry: + %0 = load i32, i32* getelementptr inbounds ([10 x i32], [10 x i32]* @_ZL5array, i64 0, i64 3), align 4 + ret i32 %0 +} + + ; FIXME: +define void @_Z16WriteStaticArrayv() { +; CHECK-LABEL: _Z16WriteStaticArrayv: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: paddi r3, 0, _ZL5array@PCREL, 1 +; CHECK-NEXT: li r4, 5 +; CHECK-NEXT: stw r4, 12(r3) +; CHECK-NEXT: blr +entry: + store i32 5, i32* getelementptr inbounds ([10 x i32], [10 x i32]* @_ZL5array, i64 0, i64 3), align 4 + ret void +} + + %struct.Struct = type { i8, i16, i32 } + + ; FIXME: +@_ZL9structure = internal global %struct.Struct zeroinitializer, align 4 +define signext i32 @_Z16ReadStaticStructv() { +; CHECK-LABEL: _Z16ReadStaticStructv: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: paddi r3, 0, _ZL9structure@PCREL, 1 +; CHECK-NEXT: lwa r3, 4(r3) +; CHECK-NEXT: blr +entry: + %0 = load i32, i32* getelementptr inbounds (%struct.Struct, %struct.Struct* @_ZL9structure, i64 0, i32 2), align 4 + ret i32 %0 +} + + ; FIXME +define void @_Z17WriteStaticStructv() { +; CHECK-LABEL: _Z17WriteStaticStructv: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: paddi r3, 0, _ZL9structure@PCREL, 1 +; CHECK-NEXT: li r4, 3 +; CHECK-NEXT: stw r4, 4(r3) +; CHECK-NEXT: blr +entry: + store i32 3, i32* getelementptr inbounds (%struct.Struct, %struct.Struct* @_ZL9structure, i64 0, i32 2), align 4 + ret void +} +