diff --git a/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp b/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp --- a/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp @@ -2069,13 +2069,14 @@ // slot for dynamic stack allocations. // The scavenger might be invoked if the frame offset does not fit into - // the 16-bit immediate. We don't know the complete frame size here - // because we've not yet computed callee-saved register spills or the - // needed alignment padding. + // the 16-bit immediate (or 8-bit in the case of SPE). We don't know the + // complete frame size here because we've not yet computed callee-saved + // register spills or the needed alignment padding. unsigned StackSize = determineFrameLayout(MF, true); MachineFrameInfo &MFI = MF.getFrameInfo(); if (MFI.hasVarSizedObjects() || spillsCR(MF) || spillsVRSAVE(MF) || - hasNonRISpills(MF) || (hasSpills(MF) && !isInt<16>(StackSize))) { + hasNonRISpills(MF) || (hasSpills(MF) && !isInt<16>(StackSize)) || + (Subtarget.hasSPE() && hasSpills(MF) && !isInt<8>(StackSize))) { const TargetRegisterClass &GPRC = PPC::GPRCRegClass; const TargetRegisterClass &G8RC = PPC::G8RCRegClass; const TargetRegisterClass &RC = Subtarget.isPPC64() ? G8RC : GPRC; diff --git a/llvm/test/CodeGen/PowerPC/spe-spills.ll b/llvm/test/CodeGen/PowerPC/spe-spills.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/PowerPC/spe-spills.ll @@ -0,0 +1,460 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -verify-machineinstrs < %s -mtriple=powerpcspe-unknown-linux-gnu \ +; RUN: -mattr=+spe | FileCheck %s +; Test that two or more spill slots are allocated for stacks larger than 256 +; bytes on powerpcspe targets +%struct.gammapweights = type { %struct.anon, %struct.anon.0, %struct.anon.1, %struct.anon.2, %struct.anon.3, %struct.anon.4 } +%struct.anon = type { %struct.g, double, double } +%struct.g = type { double, double, double } +%struct.anon.0 = type { double, double, double } +%struct.anon.1 = type { double, double, double, double, double, double, double, double, double } +%struct.anon.2 = type { double, double, double } +%struct.anon.3 = type { double, double } +%struct.anon.4 = type { double } + +; Function Attrs: nofree norecurse nounwind uwtable +define dso_local i32 @r(%struct.gammapweights* %dst, %struct.gammapweights* nocapture readonly %s, double %t, %struct.gammapweights* readonly %u, double %v) local_unnamed_addr #0 { +; CHECK-LABEL: r: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: stwu 1, -144(1) +; CHECK-NEXT: .cfi_def_cfa_offset 144 +; CHECK-NEXT: .cfi_offset r22, -40 +; CHECK-NEXT: .cfi_offset r23, -36 +; CHECK-NEXT: .cfi_offset r24, -32 +; CHECK-NEXT: .cfi_offset r25, -28 +; CHECK-NEXT: .cfi_offset r26, -24 +; CHECK-NEXT: .cfi_offset r27, -20 +; CHECK-NEXT: .cfi_offset r28, -16 +; CHECK-NEXT: .cfi_offset r29, -12 +; CHECK-NEXT: .cfi_offset r30, -8 +; CHECK-NEXT: .cfi_offset r22, -128 +; CHECK-NEXT: .cfi_offset r23, -120 +; CHECK-NEXT: .cfi_offset r24, -112 +; CHECK-NEXT: .cfi_offset r25, -104 +; CHECK-NEXT: .cfi_offset r26, -96 +; CHECK-NEXT: .cfi_offset r27, -88 +; CHECK-NEXT: .cfi_offset r28, -80 +; CHECK-NEXT: .cfi_offset r29, -72 +; CHECK-NEXT: .cfi_offset r30, -64 +; CHECK-NEXT: li 0, 16 +; CHECK-NEXT: stw 22, 104(1) # 4-byte Folded Spill +; CHECK-NEXT: li 11, 8 +; CHECK-NEXT: stw 23, 108(1) # 4-byte Folded Spill +; CHECK-NEXT: stw 24, 112(1) # 4-byte Folded Spill +; CHECK-NEXT: stw 25, 116(1) # 4-byte Folded Spill +; CHECK-NEXT: stw 26, 120(1) # 4-byte Folded Spill +; CHECK-NEXT: stw 27, 124(1) # 4-byte Folded Spill +; CHECK-NEXT: stw 28, 128(1) # 4-byte Folded Spill +; CHECK-NEXT: stw 29, 132(1) # 4-byte Folded Spill +; CHECK-NEXT: stw 30, 136(1) # 4-byte Folded Spill +; CHECK-NEXT: evstdd 22, 16(1) # 8-byte Folded Spill +; CHECK-NEXT: evstdd 23, 24(1) # 8-byte Folded Spill +; CHECK-NEXT: li 23, 48 +; CHECK-NEXT: evstdd 24, 32(1) # 8-byte Folded Spill +; CHECK-NEXT: evstdd 25, 40(1) # 8-byte Folded Spill +; CHECK-NEXT: li 25, 40 +; CHECK-NEXT: evstdd 26, 48(1) # 8-byte Folded Spill +; CHECK-NEXT: evstdd 27, 56(1) # 8-byte Folded Spill +; CHECK-NEXT: li 27, 32 +; CHECK-NEXT: evstdd 28, 64(1) # 8-byte Folded Spill +; CHECK-NEXT: evstdd 29, 72(1) # 8-byte Folded Spill +; CHECK-NEXT: li 29, 24 +; CHECK-NEXT: evstdd 30, 80(1) # 8-byte Folded Spill +; CHECK-NEXT: evmergelo 8, 9, 10 +; CHECK-NEXT: evmergelo 5, 5, 6 +; CHECK-NEXT: evlddx 30, 7, 0 +; CHECK-NEXT: evlddx 10, 4, 11 +; CHECK-NEXT: evlddx 12, 7, 11 +; CHECK-NEXT: efdmul 30, 30, 8 +; CHECK-NEXT: evlddx 28, 7, 29 +; CHECK-NEXT: efdmul 10, 10, 5 +; CHECK-NEXT: evlddx 26, 7, 27 +; CHECK-NEXT: efdmul 10, 10, 12 +; CHECK-NEXT: evlddx 22, 4, 23 +; CHECK-NEXT: efdmul 12, 28, 8 +; CHECK-NEXT: evstddx 30, 3, 0 +; CHECK-NEXT: efdadd 12, 12, 5 +; CHECK-NEXT: evlddx 0, 7, 23 +; CHECK-NEXT: evlddx 24, 7, 25 +; CHECK-NEXT: evstddx 12, 3, 29 +; CHECK-NEXT: efdmul 29, 26, 8 +; CHECK-NEXT: efdmul 26, 22, 5 +; CHECK-NEXT: efdmul 0, 0, 8 +; CHECK-NEXT: evstddx 10, 3, 11 +; CHECK-NEXT: li 10, 64 +; CHECK-NEXT: efdadd 29, 29, 5 +; CHECK-NEXT: efdmul 30, 24, 8 +; CHECK-NEXT: evlddx 11, 4, 10 +; CHECK-NEXT: efdadd 0, 26, 0 +; CHECK-NEXT: evlddx 12, 7, 10 +; CHECK-NEXT: evstddx 29, 3, 27 +; CHECK-NEXT: li 29, 72 +; CHECK-NEXT: evstddx 30, 3, 25 +; CHECK-NEXT: li 30, 56 +; CHECK-NEXT: evlddx 28, 4, 29 +; CHECK-NEXT: efdmul 11, 11, 5 +; CHECK-NEXT: efdmul 12, 12, 8 +; CHECK-NEXT: evlddx 27, 7, 29 +; CHECK-NEXT: efdadd 11, 11, 12 +; CHECK-NEXT: evstddx 0, 3, 23 +; CHECK-NEXT: li 0, 80 +; CHECK-NEXT: evlddx 25, 4, 30 +; CHECK-NEXT: efdmul 28, 28, 5 +; CHECK-NEXT: efdmul 27, 27, 8 +; CHECK-NEXT: evlddx 24, 7, 30 +; CHECK-NEXT: efdadd 28, 28, 27 +; CHECK-NEXT: evlddx 26, 4, 0 +; CHECK-NEXT: efdmul 25, 25, 5 +; CHECK-NEXT: evlddx 23, 7, 0 +; CHECK-NEXT: efdmul 24, 24, 8 +; CHECK-NEXT: evstddx 11, 3, 10 +; CHECK-NEXT: li 10, 96 +; CHECK-NEXT: efdmul 26, 26, 5 +; CHECK-NEXT: efdmul 23, 23, 8 +; CHECK-NEXT: efdadd 25, 25, 24 +; CHECK-NEXT: evlddx 11, 4, 10 +; CHECK-NEXT: efdadd 26, 26, 23 +; CHECK-NEXT: evlddx 12, 7, 10 +; CHECK-NEXT: evstddx 28, 3, 29 +; CHECK-NEXT: li 29, 104 +; CHECK-NEXT: evstddx 25, 3, 30 +; CHECK-NEXT: li 30, 88 +; CHECK-NEXT: evlddx 28, 4, 29 +; CHECK-NEXT: efdmul 11, 11, 5 +; CHECK-NEXT: efdmul 12, 12, 8 +; CHECK-NEXT: evlddx 27, 7, 29 +; CHECK-NEXT: efdadd 11, 11, 12 +; CHECK-NEXT: evstddx 26, 3, 0 +; CHECK-NEXT: li 0, 112 +; CHECK-NEXT: evlddx 25, 4, 30 +; CHECK-NEXT: efdmul 28, 28, 5 +; CHECK-NEXT: efdmul 27, 27, 8 +; CHECK-NEXT: evlddx 24, 7, 30 +; CHECK-NEXT: efdadd 28, 28, 27 +; CHECK-NEXT: evlddx 26, 4, 0 +; CHECK-NEXT: efdmul 25, 25, 5 +; CHECK-NEXT: evlddx 23, 7, 0 +; CHECK-NEXT: efdmul 24, 24, 8 +; CHECK-NEXT: evstddx 11, 3, 10 +; CHECK-NEXT: li 10, 128 +; CHECK-NEXT: efdmul 26, 26, 5 +; CHECK-NEXT: efdmul 23, 23, 8 +; CHECK-NEXT: efdadd 25, 25, 24 +; CHECK-NEXT: evlddx 11, 4, 10 +; CHECK-NEXT: efdadd 26, 26, 23 +; CHECK-NEXT: evlddx 12, 7, 10 +; CHECK-NEXT: evstddx 28, 3, 29 +; CHECK-NEXT: li 29, 136 +; CHECK-NEXT: evstddx 25, 3, 30 +; CHECK-NEXT: li 30, 120 +; CHECK-NEXT: evlddx 28, 4, 29 +; CHECK-NEXT: efdmul 11, 11, 5 +; CHECK-NEXT: efdmul 12, 12, 8 +; CHECK-NEXT: evlddx 27, 7, 29 +; CHECK-NEXT: efdadd 11, 11, 12 +; CHECK-NEXT: evstddx 26, 3, 0 +; CHECK-NEXT: li 0, 144 +; CHECK-NEXT: evlddx 25, 4, 30 +; CHECK-NEXT: efdmul 28, 28, 5 +; CHECK-NEXT: efdmul 27, 27, 8 +; CHECK-NEXT: evlddx 24, 7, 30 +; CHECK-NEXT: efdadd 28, 28, 27 +; CHECK-NEXT: evlddx 26, 4, 0 +; CHECK-NEXT: efdmul 25, 25, 5 +; CHECK-NEXT: evlddx 23, 7, 0 +; CHECK-NEXT: efdmul 24, 24, 8 +; CHECK-NEXT: evstddx 11, 3, 10 +; CHECK-NEXT: li 10, 160 +; CHECK-NEXT: efdmul 27, 26, 5 +; CHECK-NEXT: efdmul 26, 23, 8 +; CHECK-NEXT: efdadd 25, 25, 24 +; CHECK-NEXT: evlddx 11, 4, 10 +; CHECK-NEXT: efdadd 27, 27, 26 +; CHECK-NEXT: evlddx 12, 7, 10 +; CHECK-NEXT: evstddx 25, 3, 30 +; CHECK-NEXT: li 30, 152 +; CHECK-NEXT: evstddx 28, 3, 29 +; CHECK-NEXT: li 29, 168 +; CHECK-NEXT: evstddx 27, 3, 0 +; CHECK-NEXT: li 27, 176 +; CHECK-NEXT: evldd 9, 0(4) +; CHECK-NEXT: efdmul 11, 11, 5 +; CHECK-NEXT: efdmul 12, 12, 8 +; CHECK-NEXT: evldd 6, 0(7) +; CHECK-NEXT: efdadd 11, 11, 12 +; CHECK-NEXT: evlddx 25, 4, 30 +; CHECK-NEXT: efdmul 9, 9, 5 +; CHECK-NEXT: evlddx 24, 7, 30 +; CHECK-NEXT: efdmul 6, 9, 6 +; CHECK-NEXT: evlddx 28, 4, 29 +; CHECK-NEXT: efdmul 26, 25, 5 +; CHECK-NEXT: evlddx 0, 7, 29 +; CHECK-NEXT: efdmul 25, 24, 8 +; CHECK-NEXT: evlddx 4, 4, 27 +; CHECK-NEXT: efdadd 26, 26, 25 +; CHECK-NEXT: evlddx 7, 7, 27 +; CHECK-NEXT: evstddx 11, 3, 10 +; CHECK-NEXT: efdmul 10, 28, 5 +; CHECK-NEXT: efdmul 4, 4, 5 +; CHECK-NEXT: efdmul 5, 0, 8 +; CHECK-NEXT: efdmul 7, 7, 8 +; CHECK-NEXT: efdadd 5, 10, 5 +; CHECK-NEXT: efdadd 4, 4, 7 +; CHECK-NEXT: evstddx 26, 3, 30 +; CHECK-NEXT: evstddx 5, 3, 29 +; CHECK-NEXT: evstddx 4, 3, 27 +; CHECK-NEXT: evldd 30, 80(1) # 8-byte Folded Reload +; CHECK-NEXT: evldd 29, 72(1) # 8-byte Folded Reload +; CHECK-NEXT: evldd 28, 64(1) # 8-byte Folded Reload +; CHECK-NEXT: evldd 27, 56(1) # 8-byte Folded Reload +; CHECK-NEXT: evldd 26, 48(1) # 8-byte Folded Reload +; CHECK-NEXT: evldd 25, 40(1) # 8-byte Folded Reload +; CHECK-NEXT: evldd 24, 32(1) # 8-byte Folded Reload +; CHECK-NEXT: evldd 23, 24(1) # 8-byte Folded Reload +; CHECK-NEXT: evldd 22, 16(1) # 8-byte Folded Reload +; CHECK-NEXT: evstdd 6, 0(3) +; CHECK-NEXT: lwz 30, 136(1) # 4-byte Folded Reload +; CHECK-NEXT: lwz 29, 132(1) # 4-byte Folded Reload +; CHECK-NEXT: lwz 28, 128(1) # 4-byte Folded Reload +; CHECK-NEXT: lwz 27, 124(1) # 4-byte Folded Reload +; CHECK-NEXT: lwz 26, 120(1) # 4-byte Folded Reload +; CHECK-NEXT: lwz 25, 116(1) # 4-byte Folded Reload +; CHECK-NEXT: lwz 24, 112(1) # 4-byte Folded Reload +; CHECK-NEXT: lwz 23, 108(1) # 4-byte Folded Reload +; CHECK-NEXT: lwz 22, 104(1) # 4-byte Folded Reload +; CHECK-NEXT: addi 1, 1, 144 +; CHECK-NEXT: blr +entry: + %b = getelementptr inbounds %struct.gammapweights, %struct.gammapweights* %s, i32 0, i32 0, i32 0, i32 0 + %0 = load double, double* %b, align 8, !tbaa !2 + %mul = fmul double %0, %t + %b3 = getelementptr inbounds %struct.gammapweights, %struct.gammapweights* %u, i32 0, i32 0, i32 0, i32 0 + %1 = load double, double* %b3, align 8, !tbaa !2 + %mul4 = fmul double %mul, %1 + %b7 = getelementptr inbounds %struct.gammapweights, %struct.gammapweights* %dst, i32 0, i32 0, i32 0, i32 0 + store double %mul4, double* %b7, align 8, !tbaa !2 + %c10 = getelementptr inbounds %struct.gammapweights, %struct.gammapweights* %s, i32 0, i32 0, i32 0, i32 1 + %2 = load double, double* %c10, align 8, !tbaa !12 + %mul11 = fmul double %2, %t + %c14 = getelementptr inbounds %struct.gammapweights, %struct.gammapweights* %u, i32 0, i32 0, i32 0, i32 1 + %3 = load double, double* %c14, align 8, !tbaa !12 + %mul15 = fmul double %mul11, %3 + %c18 = getelementptr inbounds %struct.gammapweights, %struct.gammapweights* %dst, i32 0, i32 0, i32 0, i32 1 + store double %mul15, double* %c18, align 8, !tbaa !12 + %e = getelementptr inbounds %struct.gammapweights, %struct.gammapweights* %u, i32 0, i32 0, i32 0, i32 2 + %4 = load double, double* %e, align 8, !tbaa !13 + %mul21 = fmul double %4, %v + %e24 = getelementptr inbounds %struct.gammapweights, %struct.gammapweights* %dst, i32 0, i32 0, i32 0, i32 2 + store double %mul21, double* %e24, align 8, !tbaa !13 + %i = getelementptr inbounds %struct.gammapweights, %struct.gammapweights* %u, i32 0, i32 0, i32 1 + %5 = load double, double* %i, align 8, !tbaa !14 + %mul26 = fmul double %5, %v + %add = fadd double %mul26, %t + %i28 = getelementptr inbounds %struct.gammapweights, %struct.gammapweights* %dst, i32 0, i32 0, i32 1 + store double %add, double* %i28, align 8, !tbaa !14 + %cx = getelementptr inbounds %struct.gammapweights, %struct.gammapweights* %u, i32 0, i32 0, i32 2 + %6 = load double, double* %cx, align 8, !tbaa !15 + %mul30 = fmul double %6, %v + %add31 = fadd double %mul30, %t + %cx33 = getelementptr inbounds %struct.gammapweights, %struct.gammapweights* %dst, i32 0, i32 0, i32 2 + store double %add31, double* %cx33, align 8, !tbaa !15 + %j = getelementptr inbounds %struct.gammapweights, %struct.gammapweights* %u, i32 0, i32 1, i32 0 + %7 = load double, double* %j, align 8, !tbaa !16 + %mul35 = fmul double %7, %v + %j37 = getelementptr inbounds %struct.gammapweights, %struct.gammapweights* %dst, i32 0, i32 1, i32 0 + store double %mul35, double* %j37, align 8, !tbaa !16 + %e39 = getelementptr inbounds %struct.gammapweights, %struct.gammapweights* %s, i32 0, i32 1, i32 1 + %8 = load double, double* %e39, align 8, !tbaa !17 + %mul40 = fmul double %8, %t + %e42 = getelementptr inbounds %struct.gammapweights, %struct.gammapweights* %u, i32 0, i32 1, i32 1 + %9 = load double, double* %e42, align 8, !tbaa !17 + %mul43 = fmul double %9, %v + %add44 = fadd double %mul40, %mul43 + %e46 = getelementptr inbounds %struct.gammapweights, %struct.gammapweights* %dst, i32 0, i32 1, i32 1 + store double %add44, double* %e46, align 8, !tbaa !17 + %b48 = getelementptr inbounds %struct.gammapweights, %struct.gammapweights* %s, i32 0, i32 1, i32 2 + %10 = load double, double* %b48, align 8, !tbaa !18 + %mul49 = fmul double %10, %t + %b51 = getelementptr inbounds %struct.gammapweights, %struct.gammapweights* %u, i32 0, i32 1, i32 2 + %11 = load double, double* %b51, align 8, !tbaa !18 + %mul52 = fmul double %11, %v + %add53 = fadd double %mul49, %mul52 + %b55 = getelementptr inbounds %struct.gammapweights, %struct.gammapweights* %dst, i32 0, i32 1, i32 2 + store double %add53, double* %b55, align 8, !tbaa !18 + %j56 = getelementptr inbounds %struct.gammapweights, %struct.gammapweights* %s, i32 0, i32 2, i32 0 + %12 = load double, double* %j56, align 8, !tbaa !19 + %mul57 = fmul double %12, %t + %j59 = getelementptr inbounds %struct.gammapweights, %struct.gammapweights* %u, i32 0, i32 2, i32 0 + %13 = load double, double* %j59, align 8, !tbaa !19 + %mul60 = fmul double %13, %v + %add61 = fadd double %mul57, %mul60 + %j63 = getelementptr inbounds %struct.gammapweights, %struct.gammapweights* %dst, i32 0, i32 2, i32 0 + store double %add61, double* %j63, align 8, !tbaa !19 + %e65 = getelementptr inbounds %struct.gammapweights, %struct.gammapweights* %s, i32 0, i32 2, i32 1 + %14 = load double, double* %e65, align 8, !tbaa !20 + %mul66 = fmul double %14, %t + %e68 = getelementptr inbounds %struct.gammapweights, %struct.gammapweights* %u, i32 0, i32 2, i32 1 + %15 = load double, double* %e68, align 8, !tbaa !20 + %mul69 = fmul double %15, %v + %add70 = fadd double %mul66, %mul69 + %e72 = getelementptr inbounds %struct.gammapweights, %struct.gammapweights* %dst, i32 0, i32 2, i32 1 + store double %add70, double* %e72, align 8, !tbaa !20 + %k = getelementptr inbounds %struct.gammapweights, %struct.gammapweights* %s, i32 0, i32 2, i32 2 + %16 = load double, double* %k, align 8, !tbaa !21 + %mul74 = fmul double %16, %t + %k76 = getelementptr inbounds %struct.gammapweights, %struct.gammapweights* %u, i32 0, i32 2, i32 2 + %17 = load double, double* %k76, align 8, !tbaa !21 + %mul77 = fmul double %17, %v + %add78 = fadd double %mul74, %mul77 + %k80 = getelementptr inbounds %struct.gammapweights, %struct.gammapweights* %dst, i32 0, i32 2, i32 2 + store double %add78, double* %k80, align 8, !tbaa !21 + %gl = getelementptr inbounds %struct.gammapweights, %struct.gammapweights* %s, i32 0, i32 2, i32 3 + %18 = load double, double* %gl, align 8, !tbaa !22 + %mul82 = fmul double %18, %t + %gl84 = getelementptr inbounds %struct.gammapweights, %struct.gammapweights* %u, i32 0, i32 2, i32 3 + %19 = load double, double* %gl84, align 8, !tbaa !22 + %mul85 = fmul double %19, %v + %add86 = fadd double %mul82, %mul85 + %gl88 = getelementptr inbounds %struct.gammapweights, %struct.gammapweights* %dst, i32 0, i32 2, i32 3 + store double %add86, double* %gl88, align 8, !tbaa !22 + %bl = getelementptr inbounds %struct.gammapweights, %struct.gammapweights* %s, i32 0, i32 2, i32 4 + %20 = load double, double* %bl, align 8, !tbaa !23 + %mul90 = fmul double %20, %t + %bl92 = getelementptr inbounds %struct.gammapweights, %struct.gammapweights* %u, i32 0, i32 2, i32 4 + %21 = load double, double* %bl92, align 8, !tbaa !23 + %mul93 = fmul double %21, %v + %add94 = fadd double %mul90, %mul93 + %bl96 = getelementptr inbounds %struct.gammapweights, %struct.gammapweights* %dst, i32 0, i32 2, i32 4 + store double %add94, double* %bl96, align 8, !tbaa !23 + %l = getelementptr inbounds %struct.gammapweights, %struct.gammapweights* %s, i32 0, i32 2, i32 5 + %22 = load double, double* %l, align 8, !tbaa !24 + %mul98 = fmul double %22, %t + %l100 = getelementptr inbounds %struct.gammapweights, %struct.gammapweights* %u, i32 0, i32 2, i32 5 + %23 = load double, double* %l100, align 8, !tbaa !24 + %mul101 = fmul double %23, %v + %add102 = fadd double %mul98, %mul101 + %l104 = getelementptr inbounds %struct.gammapweights, %struct.gammapweights* %dst, i32 0, i32 2, i32 5 + store double %add102, double* %l104, align 8, !tbaa !24 + %blpow = getelementptr inbounds %struct.gammapweights, %struct.gammapweights* %s, i32 0, i32 2, i32 6 + %24 = load double, double* %blpow, align 8, !tbaa !25 + %mul106 = fmul double %24, %t + %blpow108 = getelementptr inbounds %struct.gammapweights, %struct.gammapweights* %u, i32 0, i32 2, i32 6 + %25 = load double, double* %blpow108, align 8, !tbaa !25 + %mul109 = fmul double %25, %v + %add110 = fadd double %mul106, %mul109 + %blpow112 = getelementptr inbounds %struct.gammapweights, %struct.gammapweights* %dst, i32 0, i32 2, i32 6 + store double %add110, double* %blpow112, align 8, !tbaa !25 + %lxpow = getelementptr inbounds %struct.gammapweights, %struct.gammapweights* %s, i32 0, i32 2, i32 7 + %26 = load double, double* %lxpow, align 8, !tbaa !26 + %mul114 = fmul double %26, %t + %lxpow116 = getelementptr inbounds %struct.gammapweights, %struct.gammapweights* %u, i32 0, i32 2, i32 7 + %27 = load double, double* %lxpow116, align 8, !tbaa !26 + %mul117 = fmul double %27, %v + %add118 = fadd double %mul114, %mul117 + %lxpow120 = getelementptr inbounds %struct.gammapweights, %struct.gammapweights* %dst, i32 0, i32 2, i32 7 + store double %add118, double* %lxpow120, align 8, !tbaa !26 + %lxthr = getelementptr inbounds %struct.gammapweights, %struct.gammapweights* %s, i32 0, i32 2, i32 8 + %28 = load double, double* %lxthr, align 8, !tbaa !27 + %mul122 = fmul double %28, %t + %lxthr124 = getelementptr inbounds %struct.gammapweights, %struct.gammapweights* %u, i32 0, i32 2, i32 8 + %29 = load double, double* %lxthr124, align 8, !tbaa !27 + %mul125 = fmul double %29, %v + %add126 = fadd double %mul122, %mul125 + %lxthr128 = getelementptr inbounds %struct.gammapweights, %struct.gammapweights* %dst, i32 0, i32 2, i32 8 + store double %add126, double* %lxthr128, align 8, !tbaa !27 + %m = getelementptr inbounds %struct.gammapweights, %struct.gammapweights* %s, i32 0, i32 3, i32 0 + %30 = load double, double* %m, align 8, !tbaa !28 + %mul129 = fmul double %30, %t + %m131 = getelementptr inbounds %struct.gammapweights, %struct.gammapweights* %u, i32 0, i32 3, i32 0 + %31 = load double, double* %m131, align 8, !tbaa !28 + %mul132 = fmul double %31, %v + %add133 = fadd double %mul129, %mul132 + %m135 = getelementptr inbounds %struct.gammapweights, %struct.gammapweights* %dst, i32 0, i32 3, i32 0 + store double %add133, double* %m135, align 8, !tbaa !28 + %n = getelementptr inbounds %struct.gammapweights, %struct.gammapweights* %s, i32 0, i32 3, i32 1 + %32 = load double, double* %n, align 8, !tbaa !29 + %mul137 = fmul double %32, %t + %n139 = getelementptr inbounds %struct.gammapweights, %struct.gammapweights* %u, i32 0, i32 3, i32 1 + %33 = load double, double* %n139, align 8, !tbaa !29 + %mul140 = fmul double %33, %v + %add141 = fadd double %mul137, %mul140 + %n143 = getelementptr inbounds %struct.gammapweights, %struct.gammapweights* %dst, i32 0, i32 3, i32 1 + store double %add141, double* %n143, align 8, !tbaa !29 + %dsm = getelementptr inbounds %struct.gammapweights, %struct.gammapweights* %s, i32 0, i32 3, i32 2 + %34 = load double, double* %dsm, align 8, !tbaa !30 + %mul145 = fmul double %34, %t + %dsm147 = getelementptr inbounds %struct.gammapweights, %struct.gammapweights* %u, i32 0, i32 3, i32 2 + %35 = load double, double* %dsm147, align 8, !tbaa !30 + %mul148 = fmul double %35, %v + %add149 = fadd double %mul145, %mul148 + %dsm151 = getelementptr inbounds %struct.gammapweights, %struct.gammapweights* %dst, i32 0, i32 3, i32 2 + store double %add149, double* %dsm151, align 8, !tbaa !30 + %co = getelementptr inbounds %struct.gammapweights, %struct.gammapweights* %s, i32 0, i32 4, i32 0 + %36 = load double, double* %co, align 8, !tbaa !31 + %mul152 = fmul double %36, %t + %co154 = getelementptr inbounds %struct.gammapweights, %struct.gammapweights* %u, i32 0, i32 4, i32 0 + %37 = load double, double* %co154, align 8, !tbaa !31 + %mul155 = fmul double %37, %v + %add156 = fadd double %mul152, %mul155 + %co158 = getelementptr inbounds %struct.gammapweights, %struct.gammapweights* %dst, i32 0, i32 4, i32 0 + store double %add156, double* %co158, align 8, !tbaa !31 + %p = getelementptr inbounds %struct.gammapweights, %struct.gammapweights* %s, i32 0, i32 4, i32 1 + %38 = load double, double* %p, align 8, !tbaa !32 + %mul160 = fmul double %38, %t + %p162 = getelementptr inbounds %struct.gammapweights, %struct.gammapweights* %u, i32 0, i32 4, i32 1 + %39 = load double, double* %p162, align 8, !tbaa !32 + %mul163 = fmul double %39, %v + %add164 = fadd double %mul160, %mul163 + %p166 = getelementptr inbounds %struct.gammapweights, %struct.gammapweights* %dst, i32 0, i32 4, i32 1 + store double %add164, double* %p166, align 8, !tbaa !32 + %q = getelementptr inbounds %struct.gammapweights, %struct.gammapweights* %s, i32 0, i32 5, i32 0 + %40 = load double, double* %q, align 8, !tbaa !33 + %mul167 = fmul double %40, %t + %q169 = getelementptr inbounds %struct.gammapweights, %struct.gammapweights* %u, i32 0, i32 5, i32 0 + %41 = load double, double* %q169, align 8, !tbaa !33 + %mul170 = fmul double %41, %v + %add171 = fadd double %mul167, %mul170 + %q173 = getelementptr inbounds %struct.gammapweights, %struct.gammapweights* %dst, i32 0, i32 5, i32 0 + store double %add171, double* %q173, align 8, !tbaa !33 + ret i32 undef +} + +attributes #0 = { nofree norecurse nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="ppc" "target-features"="+secure-plt,+spe,-altivec,-bpermd,-crypto,-direct-move,-extdiv,-htm,-power8-vector,-power9-vector,-qpx,-vsx" "unsafe-fp-math"="false" "use-soft-float"="false" } + +!llvm.module.flags = !{!0} +!llvm.ident = !{!1} + +!0 = !{i32 1, !"wchar_size", i32 4} +!1 = !{!"FreeBSD clang version 10.0.1 (git@github.com:llvm/llvm-project.git llvmorg-10.0.1-rc1-0-gf79cd71e145)"} +!2 = !{!3, !6, i64 0} +!3 = !{!"", !4, i64 0, !5, i64 40, !9, i64 64, !5, i64 136, !10, i64 160, !11, i64 176} +!4 = !{!"", !5, i64 0, !6, i64 24, !6, i64 32} +!5 = !{!"", !6, i64 0, !6, i64 8, !6, i64 16} +!6 = !{!"double", !7, i64 0} +!7 = !{!"omnipotent char", !8, i64 0} +!8 = !{!"Simple C/C++ TBAA"} +!9 = !{!"", !6, i64 0, !6, i64 8, !6, i64 16, !6, i64 24, !6, i64 32, !6, i64 40, !6, i64 48, !6, i64 56, !6, i64 64} +!10 = !{!"", !6, i64 0, !6, i64 8} +!11 = !{!"", !6, i64 0} +!12 = !{!3, !6, i64 8} +!13 = !{!3, !6, i64 16} +!14 = !{!3, !6, i64 24} +!15 = !{!3, !6, i64 32} +!16 = !{!3, !6, i64 40} +!17 = !{!3, !6, i64 48} +!18 = !{!3, !6, i64 56} +!19 = !{!3, !6, i64 64} +!20 = !{!3, !6, i64 72} +!21 = !{!3, !6, i64 80} +!22 = !{!3, !6, i64 88} +!23 = !{!3, !6, i64 96} +!24 = !{!3, !6, i64 104} +!25 = !{!3, !6, i64 112} +!26 = !{!3, !6, i64 120} +!27 = !{!3, !6, i64 128} +!28 = !{!3, !6, i64 136} +!29 = !{!3, !6, i64 144} +!30 = !{!3, !6, i64 152} +!31 = !{!3, !6, i64 160} +!32 = !{!3, !6, i64 168} +!33 = !{!3, !6, i64 176}