diff --git a/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp b/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp --- a/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp @@ -2286,13 +2286,20 @@ // slot for dynamic stack allocations. // The scavenger might be invoked if the frame offset does not fit into - // the 16-bit immediate. We don't know the complete frame size here - // because we've not yet computed callee-saved register spills or the - // needed alignment padding. + // the 16-bit immediate in case of not SPE and 8-bit in case of SPE. + // We don't know the complete frame size here because we've not yet computed + // callee-saved register spills or the needed alignment padding. + unsigned StackSize = determineFrameLayout(MF, true); MachineFrameInfo &MFI = MF.getFrameInfo(); + const PPCSubtarget &Subtarget = MF.getSubtarget(); if (MFI.hasVarSizedObjects() || spillsCR(MF) || hasNonRISpills(MF) || - (hasSpills(MF) && !isInt<16>(StackSize))) { + (hasSpills(MF) && [&StackSize, &Subtarget]() -> bool { + if (Subtarget.hasSPE()) + return !isInt<8>(StackSize); + else + return !isInt<16>(StackSize); + }())) { const TargetRegisterClass &GPRC = PPC::GPRCRegClass; const TargetRegisterClass &G8RC = PPC::G8RCRegClass; const TargetRegisterClass &RC = Subtarget.isPPC64() ? G8RC : GPRC; diff --git a/llvm/test/CodeGen/PowerPC/register-pressure.ll b/llvm/test/CodeGen/PowerPC/register-pressure.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/PowerPC/register-pressure.ll @@ -0,0 +1,2008 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc-unknown-linux-gnu \ +; RUN: -mattr=+spe | FileCheck %s -check-prefixes=CHECK,SPE + +target datalayout = "E-m:e-p:32:32-Fn32-i64:64-n32" +target triple = "ppc32" + +%struct.cmplx = type { double, double } + +; Function Attrs: nofree nounwind memory(readwrite, argmem: none) uwtable +define dso_local i32 @main() local_unnamed_addr #0 { +; CHECK-LABEL: main: +; CHECK: # %bb.0: +; CHECK-NEXT: mflr 0 +; CHECK-NEXT: stwu 1, -32(1) +; CHECK-NEXT: stw 0, 36(1) +; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: .cfi_offset lr, 4 +; CHECK-NEXT: lis 3, 16404 +; CHECK-NEXT: li 4, 0 +; CHECK-NEXT: lis 5, 16420 +; CHECK-NEXT: stw 3, 16(1) +; CHECK-NEXT: li 3, 160 +; CHECK-NEXT: stw 4, 20(1) +; CHECK-NEXT: stw 4, 28(1) +; CHECK-NEXT: stw 5, 24(1) +; CHECK-NEXT: bl malloc +; CHECK-NEXT: mr 4, 3 +; CHECK-NEXT: addi 3, 1, 16 +; CHECK-NEXT: mr 5, 3 +; CHECK-NEXT: bl pass11 +; CHECK-NEXT: lwz 0, 36(1) +; CHECK-NEXT: li 3, 0 +; CHECK-NEXT: addi 1, 1, 32 +; CHECK-NEXT: mtlr 0 +; CHECK-NEXT: blr + %1 = alloca %struct.cmplx, align 8 + call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %1) #5 + store double 5.000000e+00, ptr %1, align 8, !tbaa !3 + %2 = getelementptr inbounds %struct.cmplx, ptr %1, i32 0, i32 1 + store double 1.000000e+01, ptr %2, align 8, !tbaa !8 + %3 = tail call dereferenceable_or_null(160) ptr @malloc(i32 noundef 160) + call fastcc void @pass11(ptr noundef nonnull %1, ptr noundef %3, ptr noundef nonnull %1) + call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %1) #5 + ret i32 0 +} + +; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) +declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1 + +; Function Attrs: mustprogress nofree nounwind willreturn allockind("alloc,uninitialized") allocsize(0) memory(inaccessiblemem: readwrite) +declare dso_local noalias noundef ptr @malloc(i32 noundef) local_unnamed_addr #2 + +; Function Attrs: nofree noinline nosync nounwind memory(argmem: readwrite) uwtable +define internal fastcc void @pass11(ptr noalias nocapture noundef readonly %0, ptr noalias nocapture noundef writeonly %1, ptr noalias nocapture noundef readonly %2) unnamed_addr #3 { +; CHECK-LABEL: pass11: +; CHECK: # %bb.0: +; CHECK-NEXT: stwu 1, -848(1) +; CHECK-NEXT: .cfi_def_cfa_offset 848 +; CHECK-NEXT: .cfi_offset r31, -4 +; CHECK-NEXT: .cfi_offset r14, -160 +; CHECK-NEXT: .cfi_offset r15, -152 +; CHECK-NEXT: .cfi_offset r16, -144 +; CHECK-NEXT: .cfi_offset r17, -136 +; CHECK-NEXT: .cfi_offset r18, -128 +; CHECK-NEXT: .cfi_offset r19, -120 +; CHECK-NEXT: .cfi_offset r20, -112 +; CHECK-NEXT: .cfi_offset r21, -104 +; CHECK-NEXT: .cfi_offset r22, -96 +; CHECK-NEXT: .cfi_offset r23, -88 +; CHECK-NEXT: .cfi_offset r24, -80 +; CHECK-NEXT: .cfi_offset r25, -72 +; CHECK-NEXT: .cfi_offset r26, -64 +; CHECK-NEXT: .cfi_offset r27, -56 +; CHECK-NEXT: .cfi_offset r28, -48 +; CHECK-NEXT: .cfi_offset r29, -40 +; CHECK-NEXT: .cfi_offset r30, -32 +; CHECK-NEXT: li 6, 688 +; CHECK-NEXT: stw 5, 680(1) # 4-byte Folded Spill +; CHECK-NEXT: li 5, 160 +; CHECK-NEXT: stw 31, 844(1) # 4-byte Folded Spill +; CHECK-NEXT: lis 12, .LCPI1_13@ha +; CHECK-NEXT: evstddx 14, 1, 6 # 8-byte Folded Spill +; CHECK-NEXT: li 6, 696 +; CHECK-NEXT: evlddx 7, 3, 5 +; CHECK-NEXT: li 5, 80 +; CHECK-NEXT: evstddx 15, 1, 6 # 8-byte Folded Spill +; CHECK-NEXT: li 6, 704 +; CHECK-NEXT: evlddx 11, 3, 5 +; CHECK-NEXT: li 5, .LCPI1_5@l +; CHECK-NEXT: evstddx 16, 1, 6 # 8-byte Folded Spill +; CHECK-NEXT: li 6, 712 +; CHECK-NEXT: evstddx 17, 1, 6 # 8-byte Folded Spill +; CHECK-NEXT: li 6, 720 +; CHECK-NEXT: evstddx 18, 1, 6 # 8-byte Folded Spill +; CHECK-NEXT: li 6, 728 +; CHECK-NEXT: evstddx 19, 1, 6 # 8-byte Folded Spill +; CHECK-NEXT: li 6, 736 +; CHECK-NEXT: evstddx 20, 1, 6 # 8-byte Folded Spill +; CHECK-NEXT: li 6, 744 +; CHECK-NEXT: evstddx 21, 1, 6 # 8-byte Folded Spill +; CHECK-NEXT: li 6, 752 +; CHECK-NEXT: mr 21, 4 +; CHECK-NEXT: li 4, 648 +; CHECK-NEXT: evstddx 22, 1, 6 # 8-byte Folded Spill +; CHECK-NEXT: li 6, 760 +; CHECK-NEXT: evstddx 7, 1, 4 # 8-byte Folded Spill +; CHECK-NEXT: li 4, 672 +; CHECK-NEXT: evstddx 23, 1, 6 # 8-byte Folded Spill +; CHECK-NEXT: li 6, 768 +; CHECK-NEXT: evstddx 24, 1, 6 # 8-byte Folded Spill +; CHECK-NEXT: li 6, 776 +; CHECK-NEXT: evstddx 25, 1, 6 # 8-byte Folded Spill +; CHECK-NEXT: li 6, 784 +; CHECK-NEXT: evstddx 26, 1, 6 # 8-byte Folded Spill +; CHECK-NEXT: li 6, 792 +; CHECK-NEXT: evstddx 27, 1, 6 # 8-byte Folded Spill +; CHECK-NEXT: li 6, 800 +; CHECK-NEXT: evstddx 28, 1, 6 # 8-byte Folded Spill +; CHECK-NEXT: li 6, 808 +; CHECK-NEXT: lis 28, .LCPI1_12@ha +; CHECK-NEXT: evstddx 29, 1, 6 # 8-byte Folded Spill +; CHECK-NEXT: li 6, 816 +; CHECK-NEXT: evstddx 30, 1, 6 # 8-byte Folded Spill +; CHECK-NEXT: li 6, 720 +; CHECK-NEXT: evlddx 8, 3, 6 +; CHECK-NEXT: li 6, 800 +; CHECK-NEXT: evstddx 8, 1, 4 # 8-byte Folded Spill +; CHECK-NEXT: li 4, 656 +; CHECK-NEXT: evlddx 20, 3, 6 +; CHECK-NEXT: li 6, 256 +; CHECK-NEXT: evstddx 11, 1, 4 # 8-byte Folded Spill +; CHECK-NEXT: li 4, 640 +; CHECK-NEXT: efdsub 10, 7, 8 +; CHECK-NEXT: lis 7, .LCPI1_5@ha +; CHECK-NEXT: lis 8, .LCPI1_6@ha +; CHECK-NEXT: evlddx 9, 7, 5 +; CHECK-NEXT: li 5, .LCPI1_6@l +; CHECK-NEXT: evstdd 9, 232(1) # 8-byte Folded Spill +; CHECK-NEXT: efdsub 7, 11, 20 +; CHECK-NEXT: li 11, .LCPI1_13@l +; CHECK-NEXT: evlddx 19, 8, 5 +; CHECK-NEXT: lis 8, .LCPI1_8@ha +; CHECK-NEXT: evlddx 12, 12, 11 +; CHECK-NEXT: lis 11, .LCPI1_9@ha +; CHECK-NEXT: evstdd 12, 208(1) # 8-byte Folded Spill +; CHECK-NEXT: efdmul 5, 10, 9 +; CHECK-NEXT: evstddx 5, 1, 4 # 8-byte Folded Spill +; CHECK-NEXT: li 5, .LCPI1_8@l +; CHECK-NEXT: li 4, 632 +; CHECK-NEXT: evlddx 15, 8, 5 +; CHECK-NEXT: efdmul 5, 7, 19 +; CHECK-NEXT: lis 8, .LCPI1_10@ha +; CHECK-NEXT: evstdd 15, 224(1) # 8-byte Folded Spill +; CHECK-NEXT: evstddx 5, 1, 4 # 8-byte Folded Spill +; CHECK-NEXT: li 4, 624 +; CHECK-NEXT: efdmul 5, 10, 15 +; CHECK-NEXT: evstddx 5, 1, 4 # 8-byte Folded Spill +; CHECK-NEXT: li 5, .LCPI1_10@l +; CHECK-NEXT: li 4, 616 +; CHECK-NEXT: evlddx 31, 8, 5 +; CHECK-NEXT: efdmul 5, 7, 9 +; CHECK-NEXT: lis 8, .LCPI1_7@ha +; CHECK-NEXT: evstdd 31, 216(1) # 8-byte Folded Spill +; CHECK-NEXT: evstddx 5, 1, 4 # 8-byte Folded Spill +; CHECK-NEXT: li 5, .LCPI1_7@l +; CHECK-NEXT: efdmul 4, 10, 31 +; CHECK-NEXT: evlddx 5, 8, 5 +; CHECK-NEXT: lis 8, .LCPI1_11@ha +; CHECK-NEXT: evstddx 4, 1, 6 # 8-byte Folded Spill +; CHECK-NEXT: li 6, .LCPI1_11@l +; CHECK-NEXT: li 4, 608 +; CHECK-NEXT: evlddx 0, 8, 6 +; CHECK-NEXT: efdmul 6, 7, 5 +; CHECK-NEXT: li 8, 640 +; CHECK-NEXT: evstddx 6, 1, 4 # 8-byte Folded Spill +; CHECK-NEXT: li 4, 600 +; CHECK-NEXT: evlddx 25, 3, 8 +; CHECK-NEXT: efdmul 6, 10, 0 +; CHECK-NEXT: evstddx 6, 1, 4 # 8-byte Folded Spill +; CHECK-NEXT: li 6, 240 +; CHECK-NEXT: li 4, 592 +; CHECK-NEXT: evlddx 24, 3, 6 +; CHECK-NEXT: efdmul 6, 7, 15 +; CHECK-NEXT: li 7, 560 +; CHECK-NEXT: evstddx 6, 1, 4 # 8-byte Folded Spill +; CHECK-NEXT: li 6, 320 +; CHECK-NEXT: evlddx 18, 3, 7 +; CHECK-NEXT: li 4, 432 +; CHECK-NEXT: efdsub 7, 24, 25 +; CHECK-NEXT: efdadd 24, 24, 25 +; CHECK-NEXT: evlddx 16, 3, 6 +; CHECK-NEXT: efdsub 10, 16, 18 +; CHECK-NEXT: efdadd 25, 16, 18 +; CHECK-NEXT: efdmul 6, 7, 5 +; CHECK-NEXT: evstddx 6, 1, 4 # 8-byte Folded Spill +; CHECK-NEXT: li 4, 512 +; CHECK-NEXT: efdmul 6, 7, 31 +; CHECK-NEXT: evstddx 6, 1, 4 # 8-byte Folded Spill +; CHECK-NEXT: li 4, 560 +; CHECK-NEXT: efdmul 6, 7, 12 +; CHECK-NEXT: evstddx 6, 1, 4 # 8-byte Folded Spill +; CHECK-NEXT: li 4, 584 +; CHECK-NEXT: efdmul 6, 7, 19 +; CHECK-NEXT: li 7, .LCPI1_9@l +; CHECK-NEXT: evstddx 6, 1, 4 # 8-byte Folded Spill +; CHECK-NEXT: li 4, 408 +; CHECK-NEXT: evlddx 8, 11, 7 +; CHECK-NEXT: li 7, 480 +; CHECK-NEXT: efdmul 6, 10, 15 +; CHECK-NEXT: evlddx 23, 3, 7 +; CHECK-NEXT: evstddx 6, 1, 4 # 8-byte Folded Spill +; CHECK-NEXT: li 4, 504 +; CHECK-NEXT: efdmul 6, 10, 0 +; CHECK-NEXT: evstddx 6, 1, 4 # 8-byte Folded Spill +; CHECK-NEXT: li 4, 552 +; CHECK-NEXT: efdmul 6, 10, 19 +; CHECK-NEXT: evstddx 6, 1, 4 # 8-byte Folded Spill +; CHECK-NEXT: li 6, 400 +; CHECK-NEXT: li 4, 576 +; CHECK-NEXT: efdmul 7, 10, 8 +; CHECK-NEXT: li 10, .LCPI1_12@l +; CHECK-NEXT: evlddx 26, 3, 6 +; CHECK-NEXT: li 6, 728 +; CHECK-NEXT: evstddx 7, 1, 4 # 8-byte Folded Spill +; CHECK-NEXT: li 7, 168 +; CHECK-NEXT: li 4, 360 +; CHECK-NEXT: evlddx 27, 3, 6 +; CHECK-NEXT: evlddx 6, 28, 10 +; CHECK-NEXT: evlddx 17, 3, 7 +; CHECK-NEXT: efdsub 7, 26, 23 +; CHECK-NEXT: evstdd 6, 200(1) # 8-byte Folded Spill +; CHECK-NEXT: efdadd 23, 26, 23 +; CHECK-NEXT: efdsub 29, 17, 27 +; CHECK-NEXT: efdadd 27, 17, 27 +; CHECK-NEXT: efdmul 10, 7, 8 +; CHECK-NEXT: evstddx 10, 1, 4 # 8-byte Folded Spill +; CHECK-NEXT: li 4, 400 +; CHECK-NEXT: efdmul 10, 7, 6 +; CHECK-NEXT: evstddx 10, 1, 4 # 8-byte Folded Spill +; CHECK-NEXT: li 4, 520 +; CHECK-NEXT: efdmul 10, 7, 15 +; CHECK-NEXT: evstddx 10, 1, 4 # 8-byte Folded Spill +; CHECK-NEXT: li 4, 568 +; CHECK-NEXT: li 10, 808 +; CHECK-NEXT: efdmul 7, 7, 12 +; CHECK-NEXT: evlddx 14, 3, 10 +; CHECK-NEXT: li 10, 248 +; CHECK-NEXT: evstddx 7, 1, 4 # 8-byte Folded Spill +; CHECK-NEXT: li 4, 496 +; CHECK-NEXT: efdmul 7, 29, 9 +; CHECK-NEXT: evlddx 28, 3, 10 +; CHECK-NEXT: evstddx 7, 1, 4 # 8-byte Folded Spill +; CHECK-NEXT: li 4, 344 +; CHECK-NEXT: efdmul 7, 29, 15 +; CHECK-NEXT: evstddx 7, 1, 4 # 8-byte Folded Spill +; CHECK-NEXT: li 4, 392 +; CHECK-NEXT: efdmul 7, 29, 31 +; CHECK-NEXT: evstddx 7, 1, 4 # 8-byte Folded Spill +; CHECK-NEXT: li 7, 88 +; CHECK-NEXT: li 4, 528 +; CHECK-NEXT: evlddx 30, 3, 7 +; CHECK-NEXT: efdmul 7, 29, 0 +; CHECK-NEXT: evstddx 7, 1, 4 # 8-byte Folded Spill +; CHECK-NEXT: li 7, 648 +; CHECK-NEXT: li 4, 416 +; CHECK-NEXT: evlddx 11, 3, 7 +; CHECK-NEXT: efdsub 7, 30, 14 +; CHECK-NEXT: efdadd 30, 30, 14 +; CHECK-NEXT: efdsub 22, 28, 11 +; CHECK-NEXT: efdadd 11, 28, 11 +; CHECK-NEXT: efdmul 10, 7, 19 +; CHECK-NEXT: evstddx 10, 1, 4 # 8-byte Folded Spill +; CHECK-NEXT: li 4, 320 +; CHECK-NEXT: li 10, 408 +; CHECK-NEXT: efdmul 9, 7, 9 +; CHECK-NEXT: evstddx 9, 1, 4 # 8-byte Folded Spill +; CHECK-NEXT: li 4, 368 +; CHECK-NEXT: efdmul 9, 7, 5 +; CHECK-NEXT: evstddx 9, 1, 4 # 8-byte Folded Spill +; CHECK-NEXT: li 4, 488 +; CHECK-NEXT: efdmul 7, 7, 15 +; CHECK-NEXT: evstddx 7, 1, 4 # 8-byte Folded Spill +; CHECK-NEXT: li 4, 456 +; CHECK-NEXT: li 7, 328 +; CHECK-NEXT: evstddx 5, 1, 4 # 8-byte Folded Spill +; CHECK-NEXT: efdmul 5, 22, 5 +; CHECK-NEXT: li 4, 424 +; CHECK-NEXT: evlddx 9, 3, 7 +; CHECK-NEXT: evstddx 5, 1, 4 # 8-byte Folded Spill +; CHECK-NEXT: li 4, 304 +; CHECK-NEXT: efdmul 5, 22, 31 +; CHECK-NEXT: evlddx 31, 3, 10 +; CHECK-NEXT: evstddx 5, 1, 4 # 8-byte Folded Spill +; CHECK-NEXT: li 4, 352 +; CHECK-NEXT: efdmul 5, 22, 12 +; CHECK-NEXT: evstddx 5, 1, 4 # 8-byte Folded Spill +; CHECK-NEXT: li 5, 568 +; CHECK-NEXT: li 4, 480 +; CHECK-NEXT: efdmul 7, 22, 19 +; CHECK-NEXT: evlddx 22, 3, 5 +; CHECK-NEXT: li 5, 488 +; CHECK-NEXT: evstddx 7, 1, 4 # 8-byte Folded Spill +; CHECK-NEXT: li 4, 384 +; CHECK-NEXT: evlddx 7, 3, 5 +; CHECK-NEXT: efdsub 10, 9, 22 +; CHECK-NEXT: efdsub 5, 31, 7 +; CHECK-NEXT: efdadd 7, 31, 7 +; CHECK-NEXT: efdmul 29, 10, 15 +; CHECK-NEXT: evstddx 29, 1, 4 # 8-byte Folded Spill +; CHECK-NEXT: li 4, 448 +; CHECK-NEXT: lis 29, .LCPI1_3@ha +; CHECK-NEXT: evstddx 0, 1, 4 # 8-byte Folded Spill +; CHECK-NEXT: efdmul 0, 10, 0 +; CHECK-NEXT: li 4, 272 +; CHECK-NEXT: evstddx 0, 1, 4 # 8-byte Folded Spill +; CHECK-NEXT: li 4, 544 +; CHECK-NEXT: efdmul 0, 10, 19 +; CHECK-NEXT: evstddx 19, 1, 4 # 8-byte Folded Spill +; CHECK-NEXT: li 4, 336 +; CHECK-NEXT: lis 19, .LCPI1_0@ha +; CHECK-NEXT: efdmul 10, 10, 8 +; CHECK-NEXT: evstddx 0, 1, 4 # 8-byte Folded Spill +; CHECK-NEXT: li 4, 472 +; CHECK-NEXT: evstddx 10, 1, 4 # 8-byte Folded Spill +; CHECK-NEXT: li 4, 440 +; CHECK-NEXT: li 10, .LCPI1_0@l +; CHECK-NEXT: evstddx 8, 1, 4 # 8-byte Folded Spill +; CHECK-NEXT: efdmul 8, 5, 8 +; CHECK-NEXT: li 4, 376 +; CHECK-NEXT: evstddx 8, 1, 4 # 8-byte Folded Spill +; CHECK-NEXT: efdmul 4, 5, 6 +; CHECK-NEXT: evstdd 4, 240(1) # 8-byte Folded Spill +; CHECK-NEXT: li 4, 328 +; CHECK-NEXT: efdmul 6, 5, 15 +; CHECK-NEXT: evstddx 6, 1, 4 # 8-byte Folded Spill +; CHECK-NEXT: li 4, 464 +; CHECK-NEXT: evlddx 6, 19, 10 +; CHECK-NEXT: lis 19, .LCPI1_1@ha +; CHECK-NEXT: efdmul 5, 5, 12 +; CHECK-NEXT: evstddx 5, 1, 4 # 8-byte Folded Spill +; CHECK-NEXT: li 4, 656 +; CHECK-NEXT: li 5, .LCPI1_1@l +; CHECK-NEXT: efdmul 14, 23, 6 +; CHECK-NEXT: evlddx 8, 1, 4 # 8-byte Folded Reload +; CHECK-NEXT: li 4, 312 +; CHECK-NEXT: efdadd 15, 8, 20 +; CHECK-NEXT: evlddx 20, 19, 5 +; CHECK-NEXT: lis 19, .LCPI1_2@ha +; CHECK-NEXT: efdmul 5, 15, 6 +; CHECK-NEXT: evstddx 5, 1, 4 # 8-byte Folded Spill +; CHECK-NEXT: li 4, 672 +; CHECK-NEXT: li 5, .LCPI1_2@l +; CHECK-NEXT: efdmul 26, 27, 20 +; CHECK-NEXT: evlddx 8, 1, 4 # 8-byte Folded Reload +; CHECK-NEXT: li 4, 648 +; CHECK-NEXT: evlddx 5, 19, 5 +; CHECK-NEXT: li 19, .LCPI1_3@l +; CHECK-NEXT: evlddx 10, 1, 4 # 8-byte Folded Reload +; CHECK-NEXT: li 4, 296 +; CHECK-NEXT: evlddx 19, 29, 19 +; CHECK-NEXT: lis 29, .LCPI1_4@ha +; CHECK-NEXT: efdmul 17, 15, 20 +; CHECK-NEXT: evstdd 19, 192(1) # 8-byte Folded Spill +; CHECK-NEXT: efdadd 12, 10, 8 +; CHECK-NEXT: efdmul 8, 12, 20 +; CHECK-NEXT: evstddx 8, 1, 4 # 8-byte Folded Spill +; CHECK-NEXT: li 4, 288 +; CHECK-NEXT: efdmul 8, 24, 5 +; CHECK-NEXT: evstddx 8, 1, 4 # 8-byte Folded Spill +; CHECK-NEXT: li 8, .LCPI1_4@l +; CHECK-NEXT: li 4, 280 +; CHECK-NEXT: efdmul 0, 11, 5 +; CHECK-NEXT: evlddx 18, 29, 8 +; CHECK-NEXT: efdmul 8, 25, 19 +; CHECK-NEXT: evstddx 8, 1, 4 # 8-byte Folded Spill +; CHECK-NEXT: li 4, 672 +; CHECK-NEXT: efdmul 8, 23, 18 +; CHECK-NEXT: evstddx 8, 1, 4 # 8-byte Folded Spill +; CHECK-NEXT: efdadd 8, 9, 22 +; CHECK-NEXT: stw 3, 12(1) +; CHECK-NEXT: li 3, 264 +; CHECK-NEXT: efdmul 4, 27, 19 +; CHECK-NEXT: evstdd 4, 16(1) # 8-byte Folded Spill +; CHECK-NEXT: efdmul 4, 11, 18 +; CHECK-NEXT: evstdd 4, 24(1) # 8-byte Folded Spill +; CHECK-NEXT: efdmul 4, 8, 5 +; CHECK-NEXT: evstdd 4, 32(1) # 8-byte Folded Spill +; CHECK-NEXT: efdmul 4, 7, 6 +; CHECK-NEXT: evstdd 4, 40(1) # 8-byte Folded Spill +; CHECK-NEXT: efdmul 4, 15, 5 +; CHECK-NEXT: evstdd 4, 56(1) # 8-byte Folded Spill +; CHECK-NEXT: efdmul 4, 12, 18 +; CHECK-NEXT: evstdd 4, 48(1) # 8-byte Folded Spill +; CHECK-NEXT: efdmul 4, 24, 20 +; CHECK-NEXT: evstdd 4, 64(1) # 8-byte Folded Spill +; CHECK-NEXT: efdmul 4, 25, 6 +; CHECK-NEXT: evstdd 4, 72(1) # 8-byte Folded Spill +; CHECK-NEXT: efdmul 4, 23, 19 +; CHECK-NEXT: evstdd 4, 80(1) # 8-byte Folded Spill +; CHECK-NEXT: efdmul 4, 30, 5 +; CHECK-NEXT: evstdd 4, 96(1) # 8-byte Folded Spill +; CHECK-NEXT: efdmul 4, 27, 18 +; CHECK-NEXT: evstdd 4, 88(1) # 8-byte Folded Spill +; CHECK-NEXT: efdmul 4, 11, 20 +; CHECK-NEXT: evstdd 4, 104(1) # 8-byte Folded Spill +; CHECK-NEXT: efdmul 4, 8, 6 +; CHECK-NEXT: evstdd 4, 112(1) # 8-byte Folded Spill +; CHECK-NEXT: efdmul 4, 7, 19 +; CHECK-NEXT: evstdd 4, 120(1) # 8-byte Folded Spill +; CHECK-NEXT: efdmul 4, 15, 19 +; CHECK-NEXT: evstdd 4, 136(1) # 8-byte Folded Spill +; CHECK-NEXT: efdmul 4, 12, 5 +; CHECK-NEXT: evstdd 4, 152(1) # 8-byte Folded Spill +; CHECK-NEXT: efdmul 4, 24, 6 +; CHECK-NEXT: evstdd 4, 168(1) # 8-byte Folded Spill +; CHECK-NEXT: efdmul 4, 25, 18 +; CHECK-NEXT: evstdd 4, 184(1) # 8-byte Folded Spill +; CHECK-NEXT: efdmul 4, 23, 20 +; CHECK-NEXT: evstddx 4, 1, 3 # 8-byte Folded Spill +; CHECK-NEXT: lwz 3, 12(1) +; CHECK-NEXT: efdmul 4, 30, 19 +; CHECK-NEXT: evstdd 4, 128(1) # 8-byte Folded Spill +; CHECK-NEXT: li 4, 648 +; CHECK-NEXT: efdmul 10, 25, 5 +; CHECK-NEXT: evstddx 5, 1, 4 # 8-byte Folded Spill +; CHECK-NEXT: efdmul 4, 27, 5 +; CHECK-NEXT: li 5, 8 +; CHECK-NEXT: evstdd 4, 144(1) # 8-byte Folded Spill +; CHECK-NEXT: li 4, 664 +; CHECK-NEXT: evlddx 5, 3, 5 +; CHECK-NEXT: efdmul 29, 30, 6 +; CHECK-NEXT: evstddx 6, 1, 4 # 8-byte Folded Spill +; CHECK-NEXT: efdmul 4, 11, 6 +; CHECK-NEXT: evldd 6, 0(3) +; CHECK-NEXT: efdadd 29, 29, 5 +; CHECK-NEXT: evstdd 4, 160(1) # 8-byte Folded Spill +; CHECK-NEXT: li 4, 536 +; CHECK-NEXT: efdadd 29, 26, 29 +; CHECK-NEXT: efdmul 9, 8, 19 +; CHECK-NEXT: efdadd 0, 0, 29 +; CHECK-NEXT: evstddx 18, 1, 4 # 8-byte Folded Spill +; CHECK-NEXT: efdmul 4, 8, 18 +; CHECK-NEXT: efdadd 9, 9, 0 +; CHECK-NEXT: evstdd 4, 176(1) # 8-byte Folded Spill +; CHECK-NEXT: li 4, 656 +; CHECK-NEXT: efdmul 31, 30, 20 +; CHECK-NEXT: efdadd 30, 5, 30 +; CHECK-NEXT: efdadd 30, 30, 27 +; CHECK-NEXT: evstddx 20, 1, 4 # 8-byte Folded Spill +; CHECK-NEXT: efdadd 11, 30, 11 +; CHECK-NEXT: efdadd 8, 11, 8 +; CHECK-NEXT: efdmul 4, 7, 20 +; CHECK-NEXT: evstdd 4, 248(1) # 8-byte Folded Spill +; CHECK-NEXT: li 4, 640 +; CHECK-NEXT: efdmul 28, 7, 18 +; CHECK-NEXT: efdadd 7, 8, 7 +; CHECK-NEXT: evlddx 26, 1, 4 # 8-byte Folded Reload +; CHECK-NEXT: li 4, 632 +; CHECK-NEXT: evlddx 20, 1, 4 # 8-byte Folded Reload +; CHECK-NEXT: li 4, 432 +; CHECK-NEXT: efdmul 16, 12, 19 +; CHECK-NEXT: efdadd 19, 28, 9 +; CHECK-NEXT: efdadd 28, 17, 6 +; CHECK-NEXT: efdadd 17, 31, 5 +; CHECK-NEXT: evlddx 29, 1, 4 # 8-byte Folded Reload +; CHECK-NEXT: li 4, 408 +; CHECK-NEXT: stw 21, 432(1) # 4-byte Folded Spill +; CHECK-NEXT: efdadd 26, 20, 26 +; CHECK-NEXT: li 20, 800 +; CHECK-NEXT: efdmul 22, 24, 18 +; CHECK-NEXT: evlddx 0, 1, 4 # 8-byte Folded Reload +; CHECK-NEXT: li 4, 360 +; CHECK-NEXT: efdadd 29, 29, 26 +; CHECK-NEXT: efdadd 28, 16, 28 +; CHECK-NEXT: evlddx 9, 1, 4 # 8-byte Folded Reload +; CHECK-NEXT: efdadd 0, 0, 29 +; CHECK-NEXT: efdadd 28, 22, 28 +; CHECK-NEXT: efdadd 28, 10, 28 +; CHECK-NEXT: efdadd 18, 9, 0 +; CHECK-NEXT: li 9, 640 +; CHECK-NEXT: efdadd 28, 14, 28 +; CHECK-NEXT: efdsub 4, 19, 18 +; CHECK-NEXT: evstddx 4, 1, 9 # 8-byte Folded Spill +; CHECK-NEXT: li 4, 344 +; CHECK-NEXT: evlddx 9, 1, 4 # 8-byte Folded Reload +; CHECK-NEXT: li 4, 320 +; CHECK-NEXT: evlddx 0, 1, 4 # 8-byte Folded Reload +; CHECK-NEXT: li 4, 304 +; CHECK-NEXT: efdadd 26, 0, 9 +; CHECK-NEXT: evlddx 9, 1, 4 # 8-byte Folded Reload +; CHECK-NEXT: li 4, 272 +; CHECK-NEXT: efdadd 26, 9, 26 +; CHECK-NEXT: evlddx 9, 1, 4 # 8-byte Folded Reload +; CHECK-NEXT: evldd 4, 240(1) # 8-byte Folded Reload +; CHECK-NEXT: efdadd 26, 9, 26 +; CHECK-NEXT: li 9, 632 +; CHECK-NEXT: efdadd 26, 4, 26 +; CHECK-NEXT: efdsub 4, 28, 26 +; CHECK-NEXT: efdadd 28, 28, 26 +; CHECK-NEXT: li 26, 720 +; CHECK-NEXT: evstddx 4, 1, 9 # 8-byte Folded Spill +; CHECK-NEXT: evldd 4, 16(1) # 8-byte Folded Reload +; CHECK-NEXT: evstddx 28, 21, 26 +; CHECK-NEXT: efdadd 17, 4, 17 +; CHECK-NEXT: li 4, 624 +; CHECK-NEXT: evlddx 9, 1, 4 # 8-byte Folded Reload +; CHECK-NEXT: li 4, 616 +; CHECK-NEXT: evlddx 10, 1, 4 # 8-byte Folded Reload +; CHECK-NEXT: evldd 4, 24(1) # 8-byte Folded Reload +; CHECK-NEXT: efdadd 16, 10, 9 +; CHECK-NEXT: efdadd 17, 4, 17 +; CHECK-NEXT: li 4, 512 +; CHECK-NEXT: evlddx 9, 1, 4 # 8-byte Folded Reload +; CHECK-NEXT: evldd 4, 32(1) # 8-byte Folded Reload +; CHECK-NEXT: efdadd 16, 9, 16 +; CHECK-NEXT: efdadd 17, 4, 17 +; CHECK-NEXT: li 4, 504 +; CHECK-NEXT: evlddx 9, 1, 4 # 8-byte Folded Reload +; CHECK-NEXT: evldd 4, 40(1) # 8-byte Folded Reload +; CHECK-NEXT: efdadd 16, 9, 16 +; CHECK-NEXT: efdadd 17, 4, 17 +; CHECK-NEXT: li 4, 400 +; CHECK-NEXT: evlddx 9, 1, 4 # 8-byte Folded Reload +; CHECK-NEXT: evldd 4, 56(1) # 8-byte Folded Reload +; CHECK-NEXT: efdadd 16, 9, 16 +; CHECK-NEXT: efdadd 14, 4, 6 +; CHECK-NEXT: evldd 4, 48(1) # 8-byte Folded Reload +; CHECK-NEXT: efdsub 29, 17, 16 +; CHECK-NEXT: efdadd 17, 16, 17 +; CHECK-NEXT: efdadd 14, 4, 14 +; CHECK-NEXT: li 4, 392 +; CHECK-NEXT: evlddx 9, 1, 4 # 8-byte Folded Reload +; CHECK-NEXT: li 4, 368 +; CHECK-NEXT: evlddx 10, 1, 4 # 8-byte Folded Reload +; CHECK-NEXT: evldd 4, 64(1) # 8-byte Folded Reload +; CHECK-NEXT: efdadd 31, 10, 9 +; CHECK-NEXT: efdadd 14, 4, 14 +; CHECK-NEXT: li 4, 352 +; CHECK-NEXT: evlddx 9, 1, 4 # 8-byte Folded Reload +; CHECK-NEXT: evldd 4, 72(1) # 8-byte Folded Reload +; CHECK-NEXT: efdadd 31, 9, 31 +; CHECK-NEXT: efdadd 14, 4, 14 +; CHECK-NEXT: li 4, 336 +; CHECK-NEXT: evlddx 9, 1, 4 # 8-byte Folded Reload +; CHECK-NEXT: evldd 4, 80(1) # 8-byte Folded Reload +; CHECK-NEXT: efdadd 31, 9, 31 +; CHECK-NEXT: efdadd 14, 4, 14 +; CHECK-NEXT: li 4, 328 +; CHECK-NEXT: evlddx 9, 1, 4 # 8-byte Folded Reload +; CHECK-NEXT: evldd 4, 96(1) # 8-byte Folded Reload +; CHECK-NEXT: efdadd 31, 9, 31 +; CHECK-NEXT: efdadd 22, 4, 5 +; CHECK-NEXT: evldd 4, 88(1) # 8-byte Folded Reload +; CHECK-NEXT: efdsub 0, 14, 31 +; CHECK-NEXT: efdadd 22, 4, 22 +; CHECK-NEXT: li 4, 608 +; CHECK-NEXT: evlddx 9, 1, 4 # 8-byte Folded Reload +; CHECK-NEXT: li 4, 256 +; CHECK-NEXT: evlddx 4, 1, 4 # 8-byte Folded Reload +; CHECK-NEXT: efdadd 9, 9, 4 +; CHECK-NEXT: evldd 4, 104(1) # 8-byte Folded Reload +; CHECK-NEXT: efdadd 22, 4, 22 +; CHECK-NEXT: li 4, 560 +; CHECK-NEXT: evlddx 4, 1, 4 # 8-byte Folded Reload +; CHECK-NEXT: efdadd 10, 4, 9 +; CHECK-NEXT: li 4, 312 +; CHECK-NEXT: evlddx 4, 1, 4 # 8-byte Folded Reload +; CHECK-NEXT: efdadd 9, 4, 6 +; CHECK-NEXT: li 4, 296 +; CHECK-NEXT: evlddx 4, 1, 4 # 8-byte Folded Reload +; CHECK-NEXT: efdadd 9, 4, 9 +; CHECK-NEXT: evldd 4, 112(1) # 8-byte Folded Reload +; CHECK-NEXT: efdadd 27, 4, 22 +; CHECK-NEXT: li 4, 496 +; CHECK-NEXT: li 22, 416 +; CHECK-NEXT: evlddx 4, 1, 4 # 8-byte Folded Reload +; CHECK-NEXT: evlddx 22, 1, 22 # 8-byte Folded Reload +; CHECK-NEXT: efdadd 22, 22, 4 +; CHECK-NEXT: li 4, 552 +; CHECK-NEXT: evlddx 4, 1, 4 # 8-byte Folded Reload +; CHECK-NEXT: efdadd 10, 4, 10 +; CHECK-NEXT: li 4, 288 +; CHECK-NEXT: evlddx 4, 1, 4 # 8-byte Folded Reload +; CHECK-NEXT: efdadd 9, 4, 9 +; CHECK-NEXT: evldd 4, 120(1) # 8-byte Folded Reload +; CHECK-NEXT: efdadd 30, 4, 27 +; CHECK-NEXT: li 4, 424 +; CHECK-NEXT: evlddx 4, 1, 4 # 8-byte Folded Reload +; CHECK-NEXT: efdadd 27, 4, 22 +; CHECK-NEXT: li 4, 520 +; CHECK-NEXT: evlddx 4, 1, 4 # 8-byte Folded Reload +; CHECK-NEXT: efdadd 10, 4, 10 +; CHECK-NEXT: li 4, 280 +; CHECK-NEXT: evlddx 4, 1, 4 # 8-byte Folded Reload +; CHECK-NEXT: efdadd 9, 4, 9 +; CHECK-NEXT: li 4, 384 +; CHECK-NEXT: evlddx 4, 1, 4 # 8-byte Folded Reload +; CHECK-NEXT: efdadd 11, 4, 27 +; CHECK-NEXT: li 4, 672 +; CHECK-NEXT: efdsub 27, 30, 10 +; CHECK-NEXT: efdadd 10, 10, 30 +; CHECK-NEXT: efdadd 30, 14, 31 +; CHECK-NEXT: evlddx 4, 1, 4 # 8-byte Folded Reload +; CHECK-NEXT: efdadd 8, 4, 9 +; CHECK-NEXT: li 4, 376 +; CHECK-NEXT: evlddx 4, 1, 4 # 8-byte Folded Reload +; CHECK-NEXT: efdadd 9, 4, 11 +; CHECK-NEXT: evldd 4, 136(1) # 8-byte Folded Reload +; CHECK-NEXT: efdadd 22, 8, 9 +; CHECK-NEXT: evstddx 22, 21, 20 +; CHECK-NEXT: li 22, 592 +; CHECK-NEXT: efdadd 11, 4, 6 +; CHECK-NEXT: li 4, 8 +; CHECK-NEXT: evlddx 22, 1, 22 # 8-byte Folded Reload +; CHECK-NEXT: efdadd 6, 6, 15 +; CHECK-NEXT: efdadd 6, 6, 12 +; CHECK-NEXT: efdadd 6, 6, 24 +; CHECK-NEXT: evstddx 7, 21, 4 +; CHECK-NEXT: efdadd 6, 6, 25 +; CHECK-NEXT: evldd 4, 128(1) # 8-byte Folded Reload +; CHECK-NEXT: efdadd 5, 4, 5 +; CHECK-NEXT: evldd 4, 152(1) # 8-byte Folded Reload +; CHECK-NEXT: efdadd 7, 4, 11 +; CHECK-NEXT: li 4, 528 +; CHECK-NEXT: li 11, 488 +; CHECK-NEXT: evlddx 4, 1, 4 # 8-byte Folded Reload +; CHECK-NEXT: evlddx 11, 1, 11 # 8-byte Folded Reload +; CHECK-NEXT: efdadd 11, 11, 4 +; CHECK-NEXT: evldd 4, 144(1) # 8-byte Folded Reload +; CHECK-NEXT: efdadd 5, 4, 5 +; CHECK-NEXT: evldd 4, 168(1) # 8-byte Folded Reload +; CHECK-NEXT: efdadd 7, 4, 7 +; CHECK-NEXT: li 4, 600 +; CHECK-NEXT: evlddx 4, 1, 4 # 8-byte Folded Reload +; CHECK-NEXT: efdadd 22, 22, 4 +; CHECK-NEXT: li 4, 480 +; CHECK-NEXT: evlddx 4, 1, 4 # 8-byte Folded Reload +; CHECK-NEXT: efdadd 11, 4, 11 +; CHECK-NEXT: li 4, 168 +; CHECK-NEXT: evstddx 17, 21, 4 +; CHECK-NEXT: evldd 4, 160(1) # 8-byte Folded Reload +; CHECK-NEXT: efdadd 5, 4, 5 +; CHECK-NEXT: evldd 4, 184(1) # 8-byte Folded Reload +; CHECK-NEXT: efdadd 7, 4, 7 +; CHECK-NEXT: li 4, 584 +; CHECK-NEXT: evlddx 4, 1, 4 # 8-byte Folded Reload +; CHECK-NEXT: efdadd 22, 4, 22 +; CHECK-NEXT: li 4, 472 +; CHECK-NEXT: evlddx 4, 1, 4 # 8-byte Folded Reload +; CHECK-NEXT: efdadd 11, 4, 11 +; CHECK-NEXT: evldd 4, 176(1) # 8-byte Folded Reload +; CHECK-NEXT: efdadd 5, 4, 5 +; CHECK-NEXT: li 4, 264 +; CHECK-NEXT: evlddx 4, 1, 4 # 8-byte Folded Reload +; CHECK-NEXT: efdadd 7, 4, 7 +; CHECK-NEXT: li 4, 576 +; CHECK-NEXT: evlddx 4, 1, 4 # 8-byte Folded Reload +; CHECK-NEXT: efdadd 28, 4, 22 +; CHECK-NEXT: li 4, 464 +; CHECK-NEXT: evlddx 4, 1, 4 # 8-byte Folded Reload +; CHECK-NEXT: efdadd 11, 4, 11 +; CHECK-NEXT: li 4, 248 +; CHECK-NEXT: evstddx 10, 21, 4 +; CHECK-NEXT: evldd 4, 248(1) # 8-byte Folded Reload +; CHECK-NEXT: efdadd 5, 4, 5 +; CHECK-NEXT: li 4, 568 +; CHECK-NEXT: evlddx 4, 1, 4 # 8-byte Folded Reload +; CHECK-NEXT: efdadd 10, 4, 28 +; CHECK-NEXT: li 4, 640 +; CHECK-NEXT: efdsub 28, 7, 11 +; CHECK-NEXT: efdadd 7, 7, 11 +; CHECK-NEXT: evstddx 30, 21, 4 +; CHECK-NEXT: li 4, 328 +; CHECK-NEXT: efdadd 30, 10, 5 +; CHECK-NEXT: efdsub 5, 5, 10 +; CHECK-NEXT: evstddx 30, 21, 4 +; CHECK-NEXT: li 4, 560 +; CHECK-NEXT: evstddx 7, 21, 4 +; CHECK-NEXT: li 7, 640 +; CHECK-NEXT: li 4, 808 +; CHECK-NEXT: evlddx 7, 1, 7 # 8-byte Folded Reload +; CHECK-NEXT: evstddx 7, 21, 4 +; CHECK-NEXT: li 4, 632 +; CHECK-NEXT: li 7, 160 +; CHECK-NEXT: evlddx 4, 1, 4 # 8-byte Folded Reload +; CHECK-NEXT: evstddx 4, 21, 7 +; CHECK-NEXT: li 4, 728 +; CHECK-NEXT: li 7, 80 +; CHECK-NEXT: evstddx 29, 21, 4 +; CHECK-NEXT: li 4, 240 +; CHECK-NEXT: evstddx 0, 21, 4 +; CHECK-NEXT: li 4, 648 +; CHECK-NEXT: evstddx 27, 21, 4 +; CHECK-NEXT: li 4, 320 +; CHECK-NEXT: evstddx 28, 21, 4 +; CHECK-NEXT: li 4, 568 +; CHECK-NEXT: evstddx 5, 21, 4 +; CHECK-NEXT: efdadd 5, 6, 23 +; CHECK-NEXT: li 4, 88 +; CHECK-NEXT: lwz 6, 680(1) # 4-byte Folded Reload +; CHECK-NEXT: evstdd 5, 0(21) +; CHECK-NEXT: efdsub 5, 8, 9 +; CHECK-NEXT: addi 22, 6, -16 +; CHECK-NEXT: efdadd 6, 18, 19 +; CHECK-NEXT: evstddx 5, 21, 7 +; CHECK-NEXT: evstddx 6, 21, 4 +; CHECK-NEXT: li 5, 4 +; CHECK-NEXT: mtctr 5 +; CHECK-NEXT: .p2align 4 +; CHECK-NEXT: .LBB1_1: +; CHECK-NEXT: li 5, 96 +; CHECK-NEXT: li 4, 608 +; CHECK-NEXT: li 7, 616 +; CHECK-NEXT: li 8, 672 +; CHECK-NEXT: stw 3, 528(1) # 4-byte Folded Spill +; CHECK-NEXT: evlddx 31, 3, 5 +; CHECK-NEXT: li 5, 816 +; CHECK-NEXT: evlddx 6, 3, 5 +; CHECK-NEXT: li 5, 104 +; CHECK-NEXT: evlddx 28, 3, 5 +; CHECK-NEXT: li 5, 824 +; CHECK-NEXT: efdsub 30, 31, 6 +; CHECK-NEXT: efdadd 6, 31, 6 +; CHECK-NEXT: evlddx 25, 3, 5 +; CHECK-NEXT: li 5, 176 +; CHECK-NEXT: evstddx 30, 1, 4 # 8-byte Folded Spill +; CHECK-NEXT: evlddx 29, 3, 5 +; CHECK-NEXT: li 5, 736 +; CHECK-NEXT: efdsub 4, 28, 25 +; CHECK-NEXT: evlddx 26, 3, 5 +; CHECK-NEXT: li 5, 576 +; CHECK-NEXT: evstddx 4, 1, 5 # 8-byte Folded Spill +; CHECK-NEXT: li 5, 184 +; CHECK-NEXT: li 4, 632 +; CHECK-NEXT: efdsub 0, 29, 26 +; CHECK-NEXT: evlddx 17, 3, 5 +; CHECK-NEXT: li 5, 744 +; CHECK-NEXT: evstddx 0, 1, 4 # 8-byte Folded Spill +; CHECK-NEXT: li 4, 600 +; CHECK-NEXT: evlddx 16, 3, 5 +; CHECK-NEXT: li 5, 256 +; CHECK-NEXT: evlddx 24, 3, 5 +; CHECK-NEXT: li 5, 656 +; CHECK-NEXT: efdsub 11, 17, 16 +; CHECK-NEXT: evlddx 23, 3, 5 +; CHECK-NEXT: li 5, 264 +; CHECK-NEXT: evstddx 11, 1, 4 # 8-byte Folded Spill +; CHECK-NEXT: li 4, 624 +; CHECK-NEXT: evlddx 15, 3, 5 +; CHECK-NEXT: li 5, 664 +; CHECK-NEXT: efdsub 12, 24, 23 +; CHECK-NEXT: evlddx 21, 3, 5 +; CHECK-NEXT: li 5, 336 +; CHECK-NEXT: evstddx 12, 1, 4 # 8-byte Folded Spill +; CHECK-NEXT: evlddx 20, 3, 5 +; CHECK-NEXT: li 5, 576 +; CHECK-NEXT: efdsub 4, 15, 21 +; CHECK-NEXT: evlddx 19, 3, 5 +; CHECK-NEXT: li 5, 680 +; CHECK-NEXT: evstddx 4, 1, 5 # 8-byte Folded Spill +; CHECK-NEXT: li 5, 344 +; CHECK-NEXT: efdsub 4, 20, 19 +; CHECK-NEXT: evlddx 18, 3, 5 +; CHECK-NEXT: li 5, 584 +; CHECK-NEXT: evstddx 4, 1, 7 # 8-byte Folded Spill +; CHECK-NEXT: li 7, 416 +; CHECK-NEXT: evlddx 5, 3, 5 +; CHECK-NEXT: evlddx 14, 3, 7 +; CHECK-NEXT: li 7, 496 +; CHECK-NEXT: evlddx 27, 3, 7 +; CHECK-NEXT: efdsub 7, 18, 5 +; CHECK-NEXT: evstddx 7, 1, 8 # 8-byte Folded Spill +; CHECK-NEXT: li 7, 424 +; CHECK-NEXT: li 8, 504 +; CHECK-NEXT: evlddx 7, 3, 7 +; CHECK-NEXT: evlddx 10, 3, 8 +; CHECK-NEXT: efdsub 8, 14, 27 +; CHECK-NEXT: li 3, 480 +; CHECK-NEXT: evstddx 8, 1, 3 # 8-byte Folded Spill +; CHECK-NEXT: li 3, 640 +; CHECK-NEXT: efdsub 9, 7, 10 +; CHECK-NEXT: evstddx 9, 1, 3 # 8-byte Folded Spill +; CHECK-NEXT: li 3, 584 +; CHECK-NEXT: evstddx 6, 1, 3 # 8-byte Folded Spill +; CHECK-NEXT: li 3, 664 +; CHECK-NEXT: evlddx 8, 1, 3 # 8-byte Folded Reload +; CHECK-NEXT: efdmul 3, 6, 8 +; CHECK-NEXT: li 6, 464 +; CHECK-NEXT: evstddx 3, 1, 6 # 8-byte Folded Spill +; CHECK-NEXT: evor 3, 8, 8 +; CHECK-NEXT: li 8, 496 +; CHECK-NEXT: efdadd 6, 29, 26 +; CHECK-NEXT: li 29, 424 +; CHECK-NEXT: li 26, 520 +; CHECK-NEXT: evstddx 6, 1, 8 # 8-byte Folded Spill +; CHECK-NEXT: li 8, 656 +; CHECK-NEXT: evlddx 8, 1, 8 # 8-byte Folded Reload +; CHECK-NEXT: efdmul 6, 6, 8 +; CHECK-NEXT: evstddx 6, 1, 29 # 8-byte Folded Spill +; CHECK-NEXT: evor 29, 8, 8 +; CHECK-NEXT: li 8, 488 +; CHECK-NEXT: efdadd 6, 24, 23 +; CHECK-NEXT: evldd 31, 192(1) # 8-byte Folded Reload +; CHECK-NEXT: evstddx 6, 1, 8 # 8-byte Folded Spill +; CHECK-NEXT: li 8, 648 +; CHECK-NEXT: evlddx 8, 1, 8 # 8-byte Folded Reload +; CHECK-NEXT: efdmul 23, 6, 8 +; CHECK-NEXT: efdadd 6, 20, 19 +; CHECK-NEXT: evstddx 6, 1, 26 # 8-byte Folded Spill +; CHECK-NEXT: evldd 26, 224(1) # 8-byte Folded Reload +; CHECK-NEXT: efdmul 19, 6, 31 +; CHECK-NEXT: efdadd 6, 14, 27 +; CHECK-NEXT: li 27, 568 +; CHECK-NEXT: evstddx 6, 1, 27 # 8-byte Folded Spill +; CHECK-NEXT: li 27, 536 +; CHECK-NEXT: evlddx 24, 1, 27 # 8-byte Folded Reload +; CHECK-NEXT: li 27, 464 +; CHECK-NEXT: evlddx 27, 1, 27 # 8-byte Folded Reload +; CHECK-NEXT: efdmul 14, 6, 24 +; CHECK-NEXT: efdadd 6, 28, 25 +; CHECK-NEXT: li 28, 560 +; CHECK-NEXT: evstddx 6, 1, 28 # 8-byte Folded Spill +; CHECK-NEXT: efdmul 28, 6, 3 +; CHECK-NEXT: li 6, 504 +; CHECK-NEXT: efdadd 3, 17, 16 +; CHECK-NEXT: evstddx 3, 1, 6 # 8-byte Folded Spill +; CHECK-NEXT: efdadd 6, 15, 21 +; CHECK-NEXT: lwz 21, 528(1) # 4-byte Folded Reload +; CHECK-NEXT: efdmul 16, 3, 29 +; CHECK-NEXT: li 3, 592 +; CHECK-NEXT: evstddx 6, 1, 3 # 8-byte Folded Spill +; CHECK-NEXT: efdadd 3, 18, 5 +; CHECK-NEXT: li 5, 512 +; CHECK-NEXT: efdmul 20, 3, 31 +; CHECK-NEXT: evstddx 3, 1, 5 # 8-byte Folded Spill +; CHECK-NEXT: li 5, 552 +; CHECK-NEXT: efdadd 3, 7, 10 +; CHECK-NEXT: evldd 10, 232(1) # 8-byte Folded Reload +; CHECK-NEXT: evstddx 3, 1, 5 # 8-byte Folded Spill +; CHECK-NEXT: efdmul 25, 3, 24 +; CHECK-NEXT: li 3, 544 +; CHECK-NEXT: evlddx 5, 1, 3 # 8-byte Folded Reload +; CHECK-NEXT: li 3, 456 +; CHECK-NEXT: efdmul 15, 6, 8 +; CHECK-NEXT: evlddx 6, 1, 3 # 8-byte Folded Reload +; CHECK-NEXT: li 3, 576 +; CHECK-NEXT: efdmul 29, 0, 10 +; CHECK-NEXT: evlddx 3, 1, 3 # 8-byte Folded Reload +; CHECK-NEXT: efdmul 0, 30, 5 +; CHECK-NEXT: efdmul 7, 3, 5 +; CHECK-NEXT: li 3, 680 +; CHECK-NEXT: efdmul 30, 12, 6 +; CHECK-NEXT: efdmul 12, 4, 26 +; CHECK-NEXT: evlddx 4, 1, 3 # 8-byte Folded Reload +; CHECK-NEXT: li 3, 672 +; CHECK-NEXT: evlddx 5, 1, 3 # 8-byte Folded Reload +; CHECK-NEXT: li 3, 440 +; CHECK-NEXT: efdmul 4, 4, 6 +; CHECK-NEXT: evlddx 17, 1, 3 # 8-byte Folded Reload +; CHECK-NEXT: efdmul 8, 11, 10 +; CHECK-NEXT: lwz 11, 432(1) # 4-byte Folded Reload +; CHECK-NEXT: efdmul 6, 9, 17 +; CHECK-NEXT: li 9, 16 +; CHECK-NEXT: efdadd 7, 7, 8 +; CHECK-NEXT: evlddx 3, 21, 9 +; CHECK-NEXT: li 9, 472 +; CHECK-NEXT: efdadd 4, 4, 7 +; CHECK-NEXT: efdadd 7, 0, 29 +; CHECK-NEXT: efdmul 5, 5, 26 +; CHECK-NEXT: efdadd 7, 30, 7 +; CHECK-NEXT: li 30, 400 +; CHECK-NEXT: efdadd 7, 12, 7 +; CHECK-NEXT: evstddx 3, 1, 9 # 8-byte Folded Spill +; CHECK-NEXT: li 9, 480 +; CHECK-NEXT: efdadd 3, 27, 3 +; CHECK-NEXT: evlddx 18, 1, 9 # 8-byte Folded Reload +; CHECK-NEXT: efdadd 4, 5, 4 +; CHECK-NEXT: efdadd 4, 6, 4 +; CHECK-NEXT: efdmul 9, 18, 17 +; CHECK-NEXT: li 17, 24 +; CHECK-NEXT: evlddx 27, 21, 17 +; CHECK-NEXT: li 17, 424 +; CHECK-NEXT: li 21, 464 +; CHECK-NEXT: evlddx 17, 1, 17 # 8-byte Folded Reload +; CHECK-NEXT: evstddx 27, 1, 21 # 8-byte Folded Spill +; CHECK-NEXT: li 21, 24 +; CHECK-NEXT: efdadd 6, 28, 27 +; CHECK-NEXT: efdadd 7, 9, 7 +; CHECK-NEXT: efdadd 6, 16, 6 +; CHECK-NEXT: efdadd 6, 15, 6 +; CHECK-NEXT: efdadd 6, 20, 6 +; CHECK-NEXT: evlddx 8, 22, 21 +; CHECK-NEXT: efdadd 6, 25, 6 +; CHECK-NEXT: efdadd 3, 17, 3 +; CHECK-NEXT: li 25, 600 +; CHECK-NEXT: efdsub 9, 6, 7 +; CHECK-NEXT: efdadd 6, 7, 6 +; CHECK-NEXT: li 7, 16 +; CHECK-NEXT: efdadd 3, 23, 3 +; CHECK-NEXT: li 23, 576 +; CHECK-NEXT: efdadd 3, 19, 3 +; CHECK-NEXT: efdadd 3, 14, 3 +; CHECK-NEXT: evldd 14, 216(1) # 8-byte Folded Reload +; CHECK-NEXT: evlddx 7, 22, 7 +; CHECK-NEXT: evlddx 23, 1, 23 # 8-byte Folded Reload +; CHECK-NEXT: efdsub 5, 3, 4 +; CHECK-NEXT: efdadd 3, 3, 4 +; CHECK-NEXT: li 4, 600 +; CHECK-NEXT: evlddx 25, 1, 25 # 8-byte Folded Reload +; CHECK-NEXT: evlddx 4, 22, 4 +; CHECK-NEXT: efdmul 12, 6, 8 +; CHECK-NEXT: evstddx 12, 1, 30 # 8-byte Folded Spill +; CHECK-NEXT: li 12, 424 +; CHECK-NEXT: li 30, 352 +; CHECK-NEXT: efdmul 6, 7, 6 +; CHECK-NEXT: evstddx 6, 1, 12 # 8-byte Folded Spill +; CHECK-NEXT: efdmul 6, 7, 5 +; CHECK-NEXT: li 7, 384 +; CHECK-NEXT: evstddx 6, 1, 7 # 8-byte Folded Spill +; CHECK-NEXT: li 6, 416 +; CHECK-NEXT: li 7, 376 +; CHECK-NEXT: efdmul 5, 5, 8 +; CHECK-NEXT: evstddx 5, 1, 6 # 8-byte Folded Spill +; CHECK-NEXT: li 5, 592 +; CHECK-NEXT: efdmul 23, 23, 10 +; CHECK-NEXT: evlddx 5, 22, 5 +; CHECK-NEXT: efdmul 25, 25, 26 +; CHECK-NEXT: efdmul 6, 5, 3 +; CHECK-NEXT: evstddx 6, 1, 7 # 8-byte Folded Spill +; CHECK-NEXT: li 7, 568 +; CHECK-NEXT: li 6, 408 +; CHECK-NEXT: efdmul 3, 3, 4 +; CHECK-NEXT: evlddx 12, 1, 7 # 8-byte Folded Reload +; CHECK-NEXT: li 7, 664 +; CHECK-NEXT: evstddx 3, 1, 6 # 8-byte Folded Spill +; CHECK-NEXT: li 6, 520 +; CHECK-NEXT: efdmul 3, 9, 4 +; CHECK-NEXT: li 4, 368 +; CHECK-NEXT: evlddx 7, 1, 7 # 8-byte Folded Reload +; CHECK-NEXT: evlddx 27, 1, 6 # 8-byte Folded Reload +; CHECK-NEXT: li 6, 648 +; CHECK-NEXT: evstddx 3, 1, 4 # 8-byte Folded Spill +; CHECK-NEXT: li 4, 392 +; CHECK-NEXT: efdmul 3, 5, 9 +; CHECK-NEXT: evlddx 9, 1, 6 # 8-byte Folded Reload +; CHECK-NEXT: li 5, 488 +; CHECK-NEXT: evstddx 3, 1, 4 # 8-byte Folded Spill +; CHECK-NEXT: li 3, 656 +; CHECK-NEXT: evlddx 28, 1, 5 # 8-byte Folded Reload +; CHECK-NEXT: li 4, 496 +; CHECK-NEXT: efdmul 0, 12, 7 +; CHECK-NEXT: evlddx 8, 1, 3 # 8-byte Folded Reload +; CHECK-NEXT: li 3, 584 +; CHECK-NEXT: evlddx 29, 1, 4 # 8-byte Folded Reload +; CHECK-NEXT: evstddx 0, 1, 30 # 8-byte Folded Spill +; CHECK-NEXT: li 30, 560 +; CHECK-NEXT: evlddx 3, 1, 3 # 8-byte Folded Reload +; CHECK-NEXT: efdmul 6, 27, 9 +; CHECK-NEXT: evlddx 0, 1, 30 # 8-byte Folded Reload +; CHECK-NEXT: li 30, 360 +; CHECK-NEXT: efdmul 3, 3, 8 +; CHECK-NEXT: efdmul 8, 0, 8 +; CHECK-NEXT: evstddx 8, 1, 30 # 8-byte Folded Spill +; CHECK-NEXT: li 8, 504 +; CHECK-NEXT: li 30, 344 +; CHECK-NEXT: efdmul 5, 28, 24 +; CHECK-NEXT: evlddx 21, 1, 8 # 8-byte Folded Reload +; CHECK-NEXT: efdmul 4, 29, 31 +; CHECK-NEXT: efdmul 8, 21, 31 +; CHECK-NEXT: evstddx 8, 1, 30 # 8-byte Folded Spill +; CHECK-NEXT: li 8, 592 +; CHECK-NEXT: li 30, 336 +; CHECK-NEXT: evlddx 8, 1, 8 # 8-byte Folded Reload +; CHECK-NEXT: efdmul 8, 8, 24 +; CHECK-NEXT: evstddx 8, 1, 30 # 8-byte Folded Spill +; CHECK-NEXT: li 8, 512 +; CHECK-NEXT: evlddx 20, 1, 8 # 8-byte Folded Reload +; CHECK-NEXT: efdmul 8, 20, 9 +; CHECK-NEXT: li 9, 328 +; CHECK-NEXT: evstddx 8, 1, 9 # 8-byte Folded Spill +; CHECK-NEXT: li 8, 552 +; CHECK-NEXT: li 9, 448 +; CHECK-NEXT: evlddx 30, 1, 8 # 8-byte Folded Reload +; CHECK-NEXT: li 8, 320 +; CHECK-NEXT: evlddx 17, 1, 9 # 8-byte Folded Reload +; CHECK-NEXT: li 9, 616 +; CHECK-NEXT: evlddx 9, 1, 9 # 8-byte Folded Reload +; CHECK-NEXT: efdmul 7, 30, 7 +; CHECK-NEXT: evstddx 7, 1, 8 # 8-byte Folded Spill +; CHECK-NEXT: li 7, 632 +; CHECK-NEXT: li 8, 624 +; CHECK-NEXT: efdmul 9, 9, 17 +; CHECK-NEXT: evlddx 7, 1, 7 # 8-byte Folded Reload +; CHECK-NEXT: evlddx 8, 1, 8 # 8-byte Folded Reload +; CHECK-NEXT: efdmul 24, 7, 26 +; CHECK-NEXT: li 7, 608 +; CHECK-NEXT: evlddx 7, 1, 7 # 8-byte Folded Reload +; CHECK-NEXT: efdmul 8, 8, 14 +; CHECK-NEXT: efdmul 7, 7, 10 +; CHECK-NEXT: li 10, 680 +; CHECK-NEXT: evlddx 10, 1, 10 # 8-byte Folded Reload +; CHECK-NEXT: efdadd 7, 7, 24 +; CHECK-NEXT: evldd 24, 208(1) # 8-byte Folded Reload +; CHECK-NEXT: efdmul 19, 10, 14 +; CHECK-NEXT: li 10, 672 +; CHECK-NEXT: efdadd 7, 8, 7 +; CHECK-NEXT: li 8, 328 +; CHECK-NEXT: efdadd 7, 9, 7 +; CHECK-NEXT: evlddx 10, 1, 10 # 8-byte Folded Reload +; CHECK-NEXT: evlddx 8, 1, 8 # 8-byte Folded Reload +; CHECK-NEXT: efdmul 16, 10, 17 +; CHECK-NEXT: li 17, 640 +; CHECK-NEXT: evldd 10, 200(1) # 8-byte Folded Reload +; CHECK-NEXT: evlddx 17, 1, 17 # 8-byte Folded Reload +; CHECK-NEXT: efdmul 15, 17, 10 +; CHECK-NEXT: efdmul 10, 18, 10 +; CHECK-NEXT: li 18, 472 +; CHECK-NEXT: evlddx 18, 1, 18 # 8-byte Folded Reload +; CHECK-NEXT: efdadd 7, 10, 7 +; CHECK-NEXT: efdadd 3, 3, 18 +; CHECK-NEXT: efdadd 3, 4, 3 +; CHECK-NEXT: efdadd 4, 23, 25 +; CHECK-NEXT: li 25, 344 +; CHECK-NEXT: efdadd 3, 5, 3 +; CHECK-NEXT: li 5, 352 +; CHECK-NEXT: efdadd 4, 19, 4 +; CHECK-NEXT: efdadd 3, 6, 3 +; CHECK-NEXT: li 6, 464 +; CHECK-NEXT: efdadd 4, 16, 4 +; CHECK-NEXT: efdadd 4, 15, 4 +; CHECK-NEXT: evlddx 25, 1, 25 # 8-byte Folded Reload +; CHECK-NEXT: evlddx 5, 1, 5 # 8-byte Folded Reload +; CHECK-NEXT: evlddx 17, 1, 6 # 8-byte Folded Reload +; CHECK-NEXT: li 6, 360 +; CHECK-NEXT: evlddx 6, 1, 6 # 8-byte Folded Reload +; CHECK-NEXT: efdadd 3, 5, 3 +; CHECK-NEXT: efdsub 5, 3, 4 +; CHECK-NEXT: efdadd 3, 3, 4 +; CHECK-NEXT: li 4, 536 +; CHECK-NEXT: efdadd 6, 6, 17 +; CHECK-NEXT: evlddx 4, 22, 4 +; CHECK-NEXT: efdadd 6, 25, 6 +; CHECK-NEXT: li 25, 336 +; CHECK-NEXT: evlddx 25, 1, 25 # 8-byte Folded Reload +; CHECK-NEXT: efdadd 6, 25, 6 +; CHECK-NEXT: li 25, 328 +; CHECK-NEXT: efdadd 6, 8, 6 +; CHECK-NEXT: li 8, 320 +; CHECK-NEXT: evlddx 8, 1, 8 # 8-byte Folded Reload +; CHECK-NEXT: efdadd 6, 8, 6 +; CHECK-NEXT: li 8, 88 +; CHECK-NEXT: efdsub 9, 6, 7 +; CHECK-NEXT: efdadd 6, 7, 6 +; CHECK-NEXT: li 7, 80 +; CHECK-NEXT: evlddx 8, 22, 8 +; CHECK-NEXT: evlddx 7, 22, 7 +; CHECK-NEXT: efdmul 10, 6, 8 +; CHECK-NEXT: evstddx 10, 1, 25 # 8-byte Folded Spill +; CHECK-NEXT: li 10, 360 +; CHECK-NEXT: efdmul 6, 7, 6 +; CHECK-NEXT: evstddx 6, 1, 10 # 8-byte Folded Spill +; CHECK-NEXT: li 10, 592 +; CHECK-NEXT: efdmul 6, 7, 5 +; CHECK-NEXT: li 7, 320 +; CHECK-NEXT: evlddx 10, 1, 10 # 8-byte Folded Reload +; CHECK-NEXT: evstddx 6, 1, 7 # 8-byte Folded Spill +; CHECK-NEXT: li 6, 352 +; CHECK-NEXT: li 7, 312 +; CHECK-NEXT: efdmul 5, 5, 8 +; CHECK-NEXT: evstddx 5, 1, 6 # 8-byte Folded Spill +; CHECK-NEXT: li 5, 528 +; CHECK-NEXT: evlddx 5, 22, 5 +; CHECK-NEXT: efdmul 6, 5, 3 +; CHECK-NEXT: evstddx 6, 1, 7 # 8-byte Folded Spill +; CHECK-NEXT: li 6, 344 +; CHECK-NEXT: efdmul 3, 3, 4 +; CHECK-NEXT: evstddx 3, 1, 6 # 8-byte Folded Spill +; CHECK-NEXT: li 6, 664 +; CHECK-NEXT: efdmul 3, 9, 4 +; CHECK-NEXT: li 4, 304 +; CHECK-NEXT: evstddx 3, 1, 4 # 8-byte Folded Spill +; CHECK-NEXT: li 4, 336 +; CHECK-NEXT: efdmul 3, 5, 9 +; CHECK-NEXT: li 5, 656 +; CHECK-NEXT: evstddx 3, 1, 4 # 8-byte Folded Spill +; CHECK-NEXT: li 4, 536 +; CHECK-NEXT: li 3, 648 +; CHECK-NEXT: efdmul 7, 12, 31 +; CHECK-NEXT: evlddx 9, 1, 4 # 8-byte Folded Reload +; CHECK-NEXT: evlddx 8, 1, 3 # 8-byte Folded Reload +; CHECK-NEXT: li 3, 584 +; CHECK-NEXT: efdmul 12, 30, 31 +; CHECK-NEXT: li 30, 632 +; CHECK-NEXT: evlddx 3, 1, 3 # 8-byte Folded Reload +; CHECK-NEXT: efdmul 4, 29, 9 +; CHECK-NEXT: evlddx 29, 1, 5 # 8-byte Folded Reload +; CHECK-NEXT: efdmul 5, 28, 29 +; CHECK-NEXT: evlddx 28, 1, 6 # 8-byte Folded Reload +; CHECK-NEXT: efdmul 6, 27, 28 +; CHECK-NEXT: li 27, 544 +; CHECK-NEXT: evlddx 16, 1, 27 # 8-byte Folded Reload +; CHECK-NEXT: efdmul 9, 21, 9 +; CHECK-NEXT: efdmul 21, 20, 28 +; CHECK-NEXT: li 28, 616 +; CHECK-NEXT: evlddx 28, 1, 28 # 8-byte Folded Reload +; CHECK-NEXT: efdmul 10, 10, 29 +; CHECK-NEXT: li 29, 456 +; CHECK-NEXT: evlddx 23, 1, 29 # 8-byte Folded Reload +; CHECK-NEXT: li 29, 624 +; CHECK-NEXT: efdmul 27, 28, 16 +; CHECK-NEXT: li 28, 600 +; CHECK-NEXT: evlddx 29, 1, 29 # 8-byte Folded Reload +; CHECK-NEXT: evlddx 28, 1, 28 # 8-byte Folded Reload +; CHECK-NEXT: efdmul 3, 3, 8 +; CHECK-NEXT: efdmul 25, 28, 14 +; CHECK-NEXT: li 28, 576 +; CHECK-NEXT: efdadd 3, 3, 18 +; CHECK-NEXT: evlddx 20, 1, 28 # 8-byte Folded Reload +; CHECK-NEXT: li 28, 680 +; CHECK-NEXT: efdadd 3, 4, 3 +; CHECK-NEXT: efdmul 8, 0, 8 +; CHECK-NEXT: evlddx 0, 1, 30 # 8-byte Folded Reload +; CHECK-NEXT: li 30, 608 +; CHECK-NEXT: efdadd 3, 5, 3 +; CHECK-NEXT: evlddx 28, 1, 28 # 8-byte Folded Reload +; CHECK-NEXT: efdadd 3, 6, 3 +; CHECK-NEXT: efdadd 3, 7, 3 +; CHECK-NEXT: evlddx 30, 1, 30 # 8-byte Folded Reload +; CHECK-NEXT: efdmul 0, 0, 14 +; CHECK-NEXT: efdadd 6, 8, 17 +; CHECK-NEXT: li 8, 152 +; CHECK-NEXT: efdadd 6, 9, 6 +; CHECK-NEXT: efdadd 6, 10, 6 +; CHECK-NEXT: efdmul 19, 28, 24 +; CHECK-NEXT: li 28, 672 +; CHECK-NEXT: efdadd 6, 21, 6 +; CHECK-NEXT: evlddx 8, 22, 8 +; CHECK-NEXT: efdadd 6, 12, 6 +; CHECK-NEXT: li 12, 264 +; CHECK-NEXT: evlddx 28, 1, 28 # 8-byte Folded Reload +; CHECK-NEXT: efdmul 30, 30, 23 +; CHECK-NEXT: efdmul 16, 28, 16 +; CHECK-NEXT: li 28, 640 +; CHECK-NEXT: efdadd 7, 30, 0 +; CHECK-NEXT: evlddx 15, 1, 28 # 8-byte Folded Reload +; CHECK-NEXT: li 28, 480 +; CHECK-NEXT: efdmul 29, 29, 24 +; CHECK-NEXT: evlddx 28, 1, 28 # 8-byte Folded Reload +; CHECK-NEXT: efdmul 23, 20, 23 +; CHECK-NEXT: efdadd 7, 29, 7 +; CHECK-NEXT: efdadd 7, 27, 7 +; CHECK-NEXT: efdmul 14, 28, 26 +; CHECK-NEXT: efdadd 4, 23, 25 +; CHECK-NEXT: li 25, 656 +; CHECK-NEXT: efdadd 4, 19, 4 +; CHECK-NEXT: efdadd 4, 16, 4 +; CHECK-NEXT: efdmul 15, 15, 26 +; CHECK-NEXT: efdadd 7, 14, 7 +; CHECK-NEXT: evlddx 14, 1, 25 # 8-byte Folded Reload +; CHECK-NEXT: efdsub 9, 6, 7 +; CHECK-NEXT: efdadd 6, 7, 6 +; CHECK-NEXT: li 7, 144 +; CHECK-NEXT: efdadd 4, 15, 4 +; CHECK-NEXT: evlddx 7, 22, 7 +; CHECK-NEXT: efdsub 5, 3, 4 +; CHECK-NEXT: efdadd 3, 3, 4 +; CHECK-NEXT: li 4, 472 +; CHECK-NEXT: evlddx 4, 22, 4 +; CHECK-NEXT: efdmul 10, 6, 8 +; CHECK-NEXT: evstddx 10, 1, 12 # 8-byte Folded Spill +; CHECK-NEXT: li 10, 296 +; CHECK-NEXT: efdmul 6, 7, 6 +; CHECK-NEXT: evstddx 6, 1, 10 # 8-byte Folded Spill +; CHECK-NEXT: efdmul 6, 7, 5 +; CHECK-NEXT: li 7, 256 +; CHECK-NEXT: evstdd 6, 248(1) # 8-byte Folded Spill +; CHECK-NEXT: li 6, 288 +; CHECK-NEXT: efdmul 5, 5, 8 +; CHECK-NEXT: evstddx 5, 1, 6 # 8-byte Folded Spill +; CHECK-NEXT: li 5, 464 +; CHECK-NEXT: evlddx 5, 22, 5 +; CHECK-NEXT: efdmul 6, 5, 3 +; CHECK-NEXT: evstddx 6, 1, 7 # 8-byte Folded Spill +; CHECK-NEXT: li 6, 280 +; CHECK-NEXT: li 7, 600 +; CHECK-NEXT: efdmul 3, 3, 4 +; CHECK-NEXT: evlddx 7, 1, 7 # 8-byte Folded Reload +; CHECK-NEXT: evstddx 3, 1, 6 # 8-byte Folded Spill +; CHECK-NEXT: li 6, 544 +; CHECK-NEXT: efdmul 3, 9, 4 +; CHECK-NEXT: li 4, 272 +; CHECK-NEXT: evlddx 10, 1, 6 # 8-byte Folded Reload +; CHECK-NEXT: li 6, 632 +; CHECK-NEXT: evstdd 3, 240(1) # 8-byte Folded Spill +; CHECK-NEXT: efdmul 3, 5, 9 +; CHECK-NEXT: li 5, 624 +; CHECK-NEXT: evstddx 3, 1, 4 # 8-byte Folded Spill +; CHECK-NEXT: li 3, 632 +; CHECK-NEXT: li 4, 448 +; CHECK-NEXT: evlddx 5, 1, 5 # 8-byte Folded Reload +; CHECK-NEXT: efdmul 9, 20, 26 +; CHECK-NEXT: evlddx 3, 1, 3 # 8-byte Folded Reload +; CHECK-NEXT: evlddx 8, 1, 4 # 8-byte Folded Reload +; CHECK-NEXT: li 4, 608 +; CHECK-NEXT: efdmul 5, 5, 10 +; CHECK-NEXT: evlddx 4, 1, 4 # 8-byte Folded Reload +; CHECK-NEXT: evstddx 5, 1, 6 # 8-byte Folded Spill +; CHECK-NEXT: li 5, 616 +; CHECK-NEXT: li 6, 440 +; CHECK-NEXT: efdmul 3, 3, 8 +; CHECK-NEXT: evlddx 5, 1, 5 # 8-byte Folded Reload +; CHECK-NEXT: evlddx 12, 1, 6 # 8-byte Folded Reload +; CHECK-NEXT: efdmul 8, 7, 8 +; CHECK-NEXT: li 7, 680 +; CHECK-NEXT: evlddx 7, 1, 7 # 8-byte Folded Reload +; CHECK-NEXT: efdmul 6, 5, 12 +; CHECK-NEXT: efdadd 8, 9, 8 +; CHECK-NEXT: efdmul 10, 7, 10 +; CHECK-NEXT: li 7, 672 +; CHECK-NEXT: evlddx 7, 1, 7 # 8-byte Folded Reload +; CHECK-NEXT: efdmul 5, 28, 24 +; CHECK-NEXT: efdadd 8, 10, 8 +; CHECK-NEXT: efdmul 12, 7, 12 +; CHECK-NEXT: li 7, 640 +; CHECK-NEXT: evlddx 7, 1, 7 # 8-byte Folded Reload +; CHECK-NEXT: efdmul 4, 4, 26 +; CHECK-NEXT: efdadd 8, 12, 8 +; CHECK-NEXT: li 12, 464 +; CHECK-NEXT: efdmul 0, 7, 24 +; CHECK-NEXT: li 7, 584 +; CHECK-NEXT: efdadd 3, 4, 3 +; CHECK-NEXT: evlddx 24, 1, 7 # 8-byte Folded Reload +; CHECK-NEXT: li 7, 496 +; CHECK-NEXT: evlddx 21, 1, 7 # 8-byte Folded Reload +; CHECK-NEXT: li 7, 648 +; CHECK-NEXT: efdadd 8, 0, 8 +; CHECK-NEXT: efdmul 30, 24, 31 +; CHECK-NEXT: evlddx 19, 1, 7 # 8-byte Folded Reload +; CHECK-NEXT: li 7, 488 +; CHECK-NEXT: evlddx 18, 1, 7 # 8-byte Folded Reload +; CHECK-NEXT: li 7, 664 +; CHECK-NEXT: efdmul 29, 21, 19 +; CHECK-NEXT: evlddx 27, 1, 7 # 8-byte Folded Reload +; CHECK-NEXT: li 7, 520 +; CHECK-NEXT: evlddx 17, 1, 7 # 8-byte Folded Reload +; CHECK-NEXT: li 7, 536 +; CHECK-NEXT: efdmul 28, 18, 27 +; CHECK-NEXT: evlddx 15, 1, 7 # 8-byte Folded Reload +; CHECK-NEXT: li 7, 568 +; CHECK-NEXT: evlddx 7, 1, 7 # 8-byte Folded Reload +; CHECK-NEXT: efdmul 26, 17, 15 +; CHECK-NEXT: efdmul 25, 7, 14 +; CHECK-NEXT: li 7, 560 +; CHECK-NEXT: evlddx 7, 1, 7 # 8-byte Folded Reload +; CHECK-NEXT: efdmul 23, 7, 31 +; CHECK-NEXT: li 7, 504 +; CHECK-NEXT: evlddx 20, 1, 7 # 8-byte Folded Reload +; CHECK-NEXT: li 7, 592 +; CHECK-NEXT: evlddx 31, 1, 7 # 8-byte Folded Reload +; CHECK-NEXT: li 7, 512 +; CHECK-NEXT: efdmul 19, 20, 19 +; CHECK-NEXT: evlddx 7, 1, 7 # 8-byte Folded Reload +; CHECK-NEXT: efdmul 16, 31, 27 +; CHECK-NEXT: li 27, 552 +; CHECK-NEXT: evlddx 27, 1, 27 # 8-byte Folded Reload +; CHECK-NEXT: efdmul 15, 7, 15 +; CHECK-NEXT: efdmul 14, 27, 14 +; CHECK-NEXT: li 27, 472 +; CHECK-NEXT: evlddx 27, 1, 27 # 8-byte Folded Reload +; CHECK-NEXT: efdadd 30, 30, 27 +; CHECK-NEXT: efdadd 27, 27, 24 +; CHECK-NEXT: li 24, 104 +; CHECK-NEXT: efdadd 30, 29, 30 +; CHECK-NEXT: li 29, 320 +; CHECK-NEXT: efdadd 27, 27, 21 +; CHECK-NEXT: efdadd 9, 28, 30 +; CHECK-NEXT: li 30, 384 +; CHECK-NEXT: li 28, 304 +; CHECK-NEXT: efdadd 9, 26, 9 +; CHECK-NEXT: evlddx 26, 1, 12 # 8-byte Folded Reload +; CHECK-NEXT: efdadd 27, 27, 18 +; CHECK-NEXT: efdadd 9, 25, 9 +; CHECK-NEXT: evldd 25, 248(1) # 8-byte Folded Reload +; CHECK-NEXT: efdadd 27, 27, 17 +; CHECK-NEXT: efdsub 10, 9, 8 +; CHECK-NEXT: efdadd 8, 9, 8 +; CHECK-NEXT: li 9, 408 +; CHECK-NEXT: evlddx 0, 1, 30 # 8-byte Folded Reload +; CHECK-NEXT: li 30, 376 +; CHECK-NEXT: evlddx 29, 1, 29 # 8-byte Folded Reload +; CHECK-NEXT: evlddx 28, 1, 28 # 8-byte Folded Reload +; CHECK-NEXT: efdadd 12, 23, 26 +; CHECK-NEXT: evlddx 9, 22, 9 +; CHECK-NEXT: li 23, 744 +; CHECK-NEXT: efdadd 12, 19, 12 +; CHECK-NEXT: efdadd 4, 16, 12 +; CHECK-NEXT: li 12, 632 +; CHECK-NEXT: efdadd 4, 15, 4 +; CHECK-NEXT: efdadd 4, 14, 4 +; CHECK-NEXT: evlddx 12, 1, 12 # 8-byte Folded Reload +; CHECK-NEXT: efdadd 3, 12, 3 +; CHECK-NEXT: efdadd 3, 6, 3 +; CHECK-NEXT: efdadd 3, 5, 3 +; CHECK-NEXT: li 5, 216 +; CHECK-NEXT: efdsub 6, 4, 3 +; CHECK-NEXT: efdadd 3, 3, 4 +; CHECK-NEXT: li 4, 208 +; CHECK-NEXT: evlddx 5, 22, 5 +; CHECK-NEXT: evlddx 4, 22, 4 +; CHECK-NEXT: efdmul 19, 3, 5 +; CHECK-NEXT: efdmul 3, 4, 3 +; CHECK-NEXT: efdmul 4, 4, 10 +; CHECK-NEXT: efdmul 5, 10, 5 +; CHECK-NEXT: li 10, 400 +; CHECK-NEXT: evlddx 10, 22, 10 +; CHECK-NEXT: addi 22, 22, 16 +; CHECK-NEXT: efdsub 4, 4, 19 +; CHECK-NEXT: efdadd 3, 3, 5 +; CHECK-NEXT: efdmul 12, 10, 8 +; CHECK-NEXT: efdmul 8, 8, 9 +; CHECK-NEXT: efdmul 9, 6, 9 +; CHECK-NEXT: efdmul 6, 10, 6 +; CHECK-NEXT: li 10, 400 +; CHECK-NEXT: evlddx 10, 1, 10 # 8-byte Folded Reload +; CHECK-NEXT: efdsub 10, 0, 10 +; CHECK-NEXT: evlddx 0, 1, 30 # 8-byte Folded Reload +; CHECK-NEXT: li 30, 368 +; CHECK-NEXT: evlddx 30, 1, 30 # 8-byte Folded Reload +; CHECK-NEXT: efdsub 0, 0, 30 +; CHECK-NEXT: li 30, 328 +; CHECK-NEXT: evlddx 30, 1, 30 # 8-byte Folded Reload +; CHECK-NEXT: efdsub 30, 29, 30 +; CHECK-NEXT: li 29, 312 +; CHECK-NEXT: evlddx 29, 1, 29 # 8-byte Folded Reload +; CHECK-NEXT: efdsub 29, 29, 28 +; CHECK-NEXT: li 28, 264 +; CHECK-NEXT: evlddx 28, 1, 28 # 8-byte Folded Reload +; CHECK-NEXT: efdsub 28, 25, 28 +; CHECK-NEXT: li 25, 560 +; CHECK-NEXT: evlddx 25, 1, 25 # 8-byte Folded Reload +; CHECK-NEXT: efdadd 26, 26, 25 +; CHECK-NEXT: li 25, 392 +; CHECK-NEXT: efdadd 26, 26, 20 +; CHECK-NEXT: efdadd 26, 26, 31 +; CHECK-NEXT: efdadd 26, 26, 7 +; CHECK-NEXT: li 7, 568 +; CHECK-NEXT: evlddx 25, 1, 25 # 8-byte Folded Reload +; CHECK-NEXT: evlddx 7, 1, 7 # 8-byte Folded Reload +; CHECK-NEXT: efdadd 27, 27, 7 +; CHECK-NEXT: li 7, 552 +; CHECK-NEXT: evlddx 7, 1, 7 # 8-byte Folded Reload +; CHECK-NEXT: efdadd 26, 26, 7 +; CHECK-NEXT: li 7, 16 +; CHECK-NEXT: evstddx 27, 11, 7 +; CHECK-NEXT: li 7, 256 +; CHECK-NEXT: evldd 27, 240(1) # 8-byte Folded Reload +; CHECK-NEXT: evlddx 7, 1, 7 # 8-byte Folded Reload +; CHECK-NEXT: efdsub 27, 7, 27 +; CHECK-NEXT: li 7, 24 +; CHECK-NEXT: evstddx 26, 11, 7 +; CHECK-NEXT: li 7, 424 +; CHECK-NEXT: li 26, 416 +; CHECK-NEXT: evlddx 7, 1, 7 # 8-byte Folded Reload +; CHECK-NEXT: evlddx 26, 1, 26 # 8-byte Folded Reload +; CHECK-NEXT: efdadd 26, 7, 26 +; CHECK-NEXT: li 7, 408 +; CHECK-NEXT: evstddx 26, 11, 24 +; CHECK-NEXT: li 26, 352 +; CHECK-NEXT: li 24, 824 +; CHECK-NEXT: evlddx 7, 1, 7 # 8-byte Folded Reload +; CHECK-NEXT: evlddx 26, 1, 26 # 8-byte Folded Reload +; CHECK-NEXT: efdadd 25, 25, 7 +; CHECK-NEXT: li 7, 360 +; CHECK-NEXT: evstddx 25, 11, 24 +; CHECK-NEXT: li 25, 336 +; CHECK-NEXT: li 24, 184 +; CHECK-NEXT: evlddx 7, 1, 7 # 8-byte Folded Reload +; CHECK-NEXT: evlddx 25, 1, 25 # 8-byte Folded Reload +; CHECK-NEXT: efdadd 26, 7, 26 +; CHECK-NEXT: li 7, 344 +; CHECK-NEXT: evstddx 26, 11, 24 +; CHECK-NEXT: li 26, 288 +; CHECK-NEXT: li 24, 272 +; CHECK-NEXT: evlddx 7, 1, 7 # 8-byte Folded Reload +; CHECK-NEXT: evlddx 26, 1, 26 # 8-byte Folded Reload +; CHECK-NEXT: evlddx 24, 1, 24 # 8-byte Folded Reload +; CHECK-NEXT: efdadd 25, 25, 7 +; CHECK-NEXT: li 7, 296 +; CHECK-NEXT: evstddx 25, 11, 23 +; CHECK-NEXT: evlddx 7, 1, 7 # 8-byte Folded Reload +; CHECK-NEXT: efdadd 26, 7, 26 +; CHECK-NEXT: li 7, 280 +; CHECK-NEXT: evlddx 7, 1, 7 # 8-byte Folded Reload +; CHECK-NEXT: efdadd 24, 24, 7 +; CHECK-NEXT: li 7, 264 +; CHECK-NEXT: evstddx 26, 11, 7 +; CHECK-NEXT: li 7, 664 +; CHECK-NEXT: evstddx 24, 11, 7 +; CHECK-NEXT: li 7, 96 +; CHECK-NEXT: evstddx 10, 11, 7 +; CHECK-NEXT: li 7, 816 +; CHECK-NEXT: lwz 10, 528(1) # 4-byte Folded Reload +; CHECK-NEXT: evstddx 0, 11, 7 +; CHECK-NEXT: li 7, 176 +; CHECK-NEXT: addi 10, 10, 16 +; CHECK-NEXT: evstddx 30, 11, 7 +; CHECK-NEXT: li 7, 736 +; CHECK-NEXT: evstddx 29, 11, 7 +; CHECK-NEXT: efdsub 7, 12, 9 +; CHECK-NEXT: li 12, 336 +; CHECK-NEXT: li 9, 256 +; CHECK-NEXT: evstddx 4, 11, 12 +; CHECK-NEXT: li 4, 344 +; CHECK-NEXT: evstddx 28, 11, 9 +; CHECK-NEXT: li 9, 656 +; CHECK-NEXT: evstddx 3, 11, 4 +; CHECK-NEXT: li 3, 576 +; CHECK-NEXT: li 4, 584 +; CHECK-NEXT: evstddx 27, 11, 9 +; CHECK-NEXT: addi 9, 11, 16 +; CHECK-NEXT: evstddx 7, 11, 3 +; CHECK-NEXT: efdadd 3, 6, 8 +; CHECK-NEXT: evstddx 3, 11, 4 +; CHECK-NEXT: mr 3, 10 +; CHECK-NEXT: stw 9, 432(1) # 4-byte Folded Spill +; CHECK-NEXT: bdnz .LBB1_1 +; CHECK-NEXT: # %bb.2: +; CHECK-NEXT: li 3, 816 +; CHECK-NEXT: lwz 31, 844(1) # 4-byte Folded Reload +; CHECK-NEXT: evlddx 30, 1, 3 # 8-byte Folded Reload +; CHECK-NEXT: li 3, 808 +; CHECK-NEXT: evlddx 29, 1, 3 # 8-byte Folded Reload +; CHECK-NEXT: li 3, 800 +; CHECK-NEXT: evlddx 28, 1, 3 # 8-byte Folded Reload +; CHECK-NEXT: li 3, 792 +; CHECK-NEXT: evlddx 27, 1, 3 # 8-byte Folded Reload +; CHECK-NEXT: li 3, 784 +; CHECK-NEXT: evlddx 26, 1, 3 # 8-byte Folded Reload +; CHECK-NEXT: li 3, 776 +; CHECK-NEXT: evlddx 25, 1, 3 # 8-byte Folded Reload +; CHECK-NEXT: li 3, 768 +; CHECK-NEXT: evlddx 24, 1, 3 # 8-byte Folded Reload +; CHECK-NEXT: li 3, 760 +; CHECK-NEXT: evlddx 23, 1, 3 # 8-byte Folded Reload +; CHECK-NEXT: li 3, 752 +; CHECK-NEXT: evlddx 22, 1, 3 # 8-byte Folded Reload +; CHECK-NEXT: li 3, 744 +; CHECK-NEXT: evlddx 21, 1, 3 # 8-byte Folded Reload +; CHECK-NEXT: li 3, 736 +; CHECK-NEXT: evlddx 20, 1, 3 # 8-byte Folded Reload +; CHECK-NEXT: li 3, 728 +; CHECK-NEXT: evlddx 19, 1, 3 # 8-byte Folded Reload +; CHECK-NEXT: li 3, 720 +; CHECK-NEXT: evlddx 18, 1, 3 # 8-byte Folded Reload +; CHECK-NEXT: li 3, 712 +; CHECK-NEXT: evlddx 17, 1, 3 # 8-byte Folded Reload +; CHECK-NEXT: li 3, 704 +; CHECK-NEXT: evlddx 16, 1, 3 # 8-byte Folded Reload +; CHECK-NEXT: li 3, 696 +; CHECK-NEXT: evlddx 15, 1, 3 # 8-byte Folded Reload +; CHECK-NEXT: li 3, 688 +; CHECK-NEXT: evlddx 14, 1, 3 # 8-byte Folded Reload +; CHECK-NEXT: addi 1, 1, 848 +; CHECK-NEXT: blr + %4 = load double, ptr %0, align 8, !tbaa.struct !9 + %5 = getelementptr inbounds i8, ptr %0, i32 8 + %6 = load double, ptr %5, align 8, !tbaa.struct !11 + %7 = getelementptr inbounds %struct.cmplx, ptr %0, i32 5 + %8 = load double, ptr %7, align 8, !tbaa !3 + %9 = getelementptr inbounds %struct.cmplx, ptr %0, i32 50 + %10 = load double, ptr %9, align 8, !tbaa !3 + %11 = fadd double %8, %10 + %12 = getelementptr inbounds %struct.cmplx, ptr %0, i32 5, i32 1 + %13 = load double, ptr %12, align 8, !tbaa !8 + %14 = getelementptr inbounds %struct.cmplx, ptr %0, i32 50, i32 1 + %15 = load double, ptr %14, align 8, !tbaa !8 + %16 = fadd double %13, %15 + %17 = fsub double %8, %10 + %18 = fsub double %13, %15 + %19 = getelementptr inbounds %struct.cmplx, ptr %0, i32 10 + %20 = load double, ptr %19, align 8, !tbaa !3 + %21 = getelementptr inbounds %struct.cmplx, ptr %0, i32 45 + %22 = load double, ptr %21, align 8, !tbaa !3 + %23 = fadd double %20, %22 + %24 = getelementptr inbounds %struct.cmplx, ptr %0, i32 10, i32 1 + %25 = load double, ptr %24, align 8, !tbaa !8 + %26 = getelementptr inbounds %struct.cmplx, ptr %0, i32 45, i32 1 + %27 = load double, ptr %26, align 8, !tbaa !8 + %28 = fadd double %25, %27 + %29 = fsub double %20, %22 + %30 = fsub double %25, %27 + %31 = getelementptr inbounds %struct.cmplx, ptr %0, i32 15 + %32 = load double, ptr %31, align 8, !tbaa !3 + %33 = getelementptr inbounds %struct.cmplx, ptr %0, i32 40 + %34 = load double, ptr %33, align 8, !tbaa !3 + %35 = fadd double %32, %34 + %36 = getelementptr inbounds %struct.cmplx, ptr %0, i32 15, i32 1 + %37 = load double, ptr %36, align 8, !tbaa !8 + %38 = getelementptr inbounds %struct.cmplx, ptr %0, i32 40, i32 1 + %39 = load double, ptr %38, align 8, !tbaa !8 + %40 = fadd double %37, %39 + %41 = fsub double %32, %34 + %42 = fsub double %37, %39 + %43 = getelementptr inbounds %struct.cmplx, ptr %0, i32 20 + %44 = load double, ptr %43, align 8, !tbaa !3 + %45 = getelementptr inbounds %struct.cmplx, ptr %0, i32 35 + %46 = load double, ptr %45, align 8, !tbaa !3 + %47 = fadd double %44, %46 + %48 = getelementptr inbounds %struct.cmplx, ptr %0, i32 20, i32 1 + %49 = load double, ptr %48, align 8, !tbaa !8 + %50 = getelementptr inbounds %struct.cmplx, ptr %0, i32 35, i32 1 + %51 = load double, ptr %50, align 8, !tbaa !8 + %52 = fadd double %49, %51 + %53 = fsub double %44, %46 + %54 = fsub double %49, %51 + %55 = getelementptr inbounds %struct.cmplx, ptr %0, i32 25 + %56 = load double, ptr %55, align 8, !tbaa !3 + %57 = getelementptr inbounds %struct.cmplx, ptr %0, i32 30 + %58 = load double, ptr %57, align 8, !tbaa !3 + %59 = fadd double %56, %58 + %60 = getelementptr inbounds %struct.cmplx, ptr %0, i32 25, i32 1 + %61 = load double, ptr %60, align 8, !tbaa !8 + %62 = getelementptr inbounds %struct.cmplx, ptr %0, i32 30, i32 1 + %63 = load double, ptr %62, align 8, !tbaa !8 + %64 = fadd double %61, %63 + %65 = fsub double %56, %58 + %66 = fsub double %61, %63 + %67 = fadd double %4, %11 + %68 = fadd double %67, %23 + %69 = fadd double %68, %35 + %70 = fadd double %69, %47 + %71 = fadd double %70, %59 + store double %71, ptr %1, align 8, !tbaa !3 + %72 = fadd double %6, %16 + %73 = fadd double %72, %28 + %74 = fadd double %73, %40 + %75 = fadd double %74, %52 + %76 = fadd double %75, %64 + %77 = getelementptr inbounds %struct.cmplx, ptr %1, i32 0, i32 1 + store double %76, ptr %77, align 8, !tbaa !8 + %78 = tail call double @llvm.fmuladd.f64(double %11, double 0x3FEAEB8C8764F0BA, double %4) + %79 = tail call double @llvm.fmuladd.f64(double %23, double 0x3FDA9628D9C712B6, double %78) + %80 = tail call double @llvm.fmuladd.f64(double %35, double 0xBFC2375F640F44DB, double %79) + %81 = tail call double @llvm.fmuladd.f64(double %47, double 0xBFE4F49E7F775887, double %80) + %82 = tail call double @llvm.fmuladd.f64(double %59, double 0xBFEEB42A9BCD5057, double %81) + %83 = tail call double @llvm.fmuladd.f64(double %16, double 0x3FEAEB8C8764F0BA, double %6) + %84 = tail call double @llvm.fmuladd.f64(double %28, double 0x3FDA9628D9C712B6, double %83) + %85 = tail call double @llvm.fmuladd.f64(double %40, double 0xBFC2375F640F44DB, double %84) + %86 = tail call double @llvm.fmuladd.f64(double %52, double 0xBFE4F49E7F775887, double %85) + %87 = tail call double @llvm.fmuladd.f64(double %64, double 0xBFEEB42A9BCD5057, double %86) + %88 = fmul double %29, 0x3FED1BB48EEE2C13 + %89 = tail call double @llvm.fmuladd.f64(double %17, double 0x3FE14CEDF8BB580B, double %88) + %90 = tail call double @llvm.fmuladd.f64(double %41, double 0x3FEFAC9E043842EF, double %89) + %91 = tail call double @llvm.fmuladd.f64(double %53, double 0x3FE82F19BB3A28A1, double %90) + %92 = tail call double @llvm.fmuladd.f64(double %65, double 0x3FD207E7FD768DBF, double %91) + %93 = fmul double %30, 0x3FED1BB48EEE2C13 + %94 = tail call double @llvm.fmuladd.f64(double %18, double 0x3FE14CEDF8BB580B, double %93) + %95 = tail call double @llvm.fmuladd.f64(double %42, double 0x3FEFAC9E043842EF, double %94) + %96 = tail call double @llvm.fmuladd.f64(double %54, double 0x3FE82F19BB3A28A1, double %95) + %97 = tail call double @llvm.fmuladd.f64(double %66, double 0x3FD207E7FD768DBF, double %96) + %98 = fsub double %82, %97 + %99 = getelementptr inbounds %struct.cmplx, ptr %1, i32 5 + store double %98, ptr %99, align 8, !tbaa !3 + %100 = fadd double %92, %87 + %101 = getelementptr inbounds %struct.cmplx, ptr %1, i32 5, i32 1 + store double %100, ptr %101, align 8, !tbaa !8 + %102 = fadd double %82, %97 + %103 = getelementptr inbounds %struct.cmplx, ptr %1, i32 50 + store double %102, ptr %103, align 8, !tbaa !3 + %104 = fsub double %87, %92 + %105 = getelementptr inbounds %struct.cmplx, ptr %1, i32 50, i32 1 + store double %104, ptr %105, align 8, !tbaa !8 + %106 = tail call double @llvm.fmuladd.f64(double %11, double 0x3FDA9628D9C712B6, double %4) + %107 = tail call double @llvm.fmuladd.f64(double %23, double 0xBFE4F49E7F775887, double %106) + %108 = tail call double @llvm.fmuladd.f64(double %35, double 0xBFEEB42A9BCD5057, double %107) + %109 = tail call double @llvm.fmuladd.f64(double %47, double 0xBFC2375F640F44DB, double %108) + %110 = tail call double @llvm.fmuladd.f64(double %59, double 0x3FEAEB8C8764F0BA, double %109) + %111 = tail call double @llvm.fmuladd.f64(double %16, double 0x3FDA9628D9C712B6, double %6) + %112 = tail call double @llvm.fmuladd.f64(double %28, double 0xBFE4F49E7F775887, double %111) + %113 = tail call double @llvm.fmuladd.f64(double %40, double 0xBFEEB42A9BCD5057, double %112) + %114 = tail call double @llvm.fmuladd.f64(double %52, double 0xBFC2375F640F44DB, double %113) + %115 = tail call double @llvm.fmuladd.f64(double %64, double 0x3FEAEB8C8764F0BA, double %114) + %116 = fmul double %29, 0x3FE82F19BB3A28A1 + %117 = tail call double @llvm.fmuladd.f64(double %17, double 0x3FED1BB48EEE2C13, double %116) + %118 = tail call double @llvm.fmuladd.f64(double %41, double 0xBFD207E7FD768DBF, double %117) + %119 = tail call double @llvm.fmuladd.f64(double %53, double 0xBFEFAC9E043842EF, double %118) + %120 = tail call double @llvm.fmuladd.f64(double %65, double 0xBFE14CEDF8BB580B, double %119) + %121 = fmul double %30, 0x3FE82F19BB3A28A1 + %122 = tail call double @llvm.fmuladd.f64(double %18, double 0x3FED1BB48EEE2C13, double %121) + %123 = tail call double @llvm.fmuladd.f64(double %42, double 0xBFD207E7FD768DBF, double %122) + %124 = tail call double @llvm.fmuladd.f64(double %54, double 0xBFEFAC9E043842EF, double %123) + %125 = tail call double @llvm.fmuladd.f64(double %66, double 0xBFE14CEDF8BB580B, double %124) + %126 = fsub double %110, %125 + %127 = getelementptr inbounds %struct.cmplx, ptr %1, i32 10 + store double %126, ptr %127, align 8, !tbaa !3 + %128 = fadd double %120, %115 + %129 = getelementptr inbounds %struct.cmplx, ptr %1, i32 10, i32 1 + store double %128, ptr %129, align 8, !tbaa !8 + %130 = fadd double %110, %125 + %131 = getelementptr inbounds %struct.cmplx, ptr %1, i32 45 + store double %130, ptr %131, align 8, !tbaa !3 + %132 = fsub double %115, %120 + %133 = getelementptr inbounds %struct.cmplx, ptr %1, i32 45, i32 1 + store double %132, ptr %133, align 8, !tbaa !8 + %134 = tail call double @llvm.fmuladd.f64(double %11, double 0xBFC2375F640F44DB, double %4) + %135 = tail call double @llvm.fmuladd.f64(double %23, double 0xBFEEB42A9BCD5057, double %134) + %136 = tail call double @llvm.fmuladd.f64(double %35, double 0x3FDA9628D9C712B6, double %135) + %137 = tail call double @llvm.fmuladd.f64(double %47, double 0x3FEAEB8C8764F0BA, double %136) + %138 = tail call double @llvm.fmuladd.f64(double %59, double 0xBFE4F49E7F775887, double %137) + %139 = tail call double @llvm.fmuladd.f64(double %16, double 0xBFC2375F640F44DB, double %6) + %140 = tail call double @llvm.fmuladd.f64(double %28, double 0xBFEEB42A9BCD5057, double %139) + %141 = tail call double @llvm.fmuladd.f64(double %40, double 0x3FDA9628D9C712B6, double %140) + %142 = tail call double @llvm.fmuladd.f64(double %52, double 0x3FEAEB8C8764F0BA, double %141) + %143 = tail call double @llvm.fmuladd.f64(double %64, double 0xBFE4F49E7F775887, double %142) + %144 = fmul double %29, 0xBFD207E7FD768DBF + %145 = tail call double @llvm.fmuladd.f64(double %17, double 0x3FEFAC9E043842EF, double %144) + %146 = tail call double @llvm.fmuladd.f64(double %41, double 0xBFED1BB48EEE2C13, double %145) + %147 = tail call double @llvm.fmuladd.f64(double %53, double 0x3FE14CEDF8BB580B, double %146) + %148 = tail call double @llvm.fmuladd.f64(double %65, double 0x3FE82F19BB3A28A1, double %147) + %149 = fmul double %30, 0xBFD207E7FD768DBF + %150 = tail call double @llvm.fmuladd.f64(double %18, double 0x3FEFAC9E043842EF, double %149) + %151 = tail call double @llvm.fmuladd.f64(double %42, double 0xBFED1BB48EEE2C13, double %150) + %152 = tail call double @llvm.fmuladd.f64(double %54, double 0x3FE14CEDF8BB580B, double %151) + %153 = tail call double @llvm.fmuladd.f64(double %66, double 0x3FE82F19BB3A28A1, double %152) + %154 = fsub double %138, %153 + %155 = getelementptr inbounds %struct.cmplx, ptr %1, i32 15 + store double %154, ptr %155, align 8, !tbaa !3 + %156 = fadd double %148, %143 + %157 = getelementptr inbounds %struct.cmplx, ptr %1, i32 15, i32 1 + store double %156, ptr %157, align 8, !tbaa !8 + %158 = fadd double %138, %153 + %159 = getelementptr inbounds %struct.cmplx, ptr %1, i32 40 + store double %158, ptr %159, align 8, !tbaa !3 + %160 = fsub double %143, %148 + %161 = getelementptr inbounds %struct.cmplx, ptr %1, i32 40, i32 1 + store double %160, ptr %161, align 8, !tbaa !8 + %162 = tail call double @llvm.fmuladd.f64(double %11, double 0xBFE4F49E7F775887, double %4) + %163 = tail call double @llvm.fmuladd.f64(double %23, double 0xBFC2375F640F44DB, double %162) + %164 = tail call double @llvm.fmuladd.f64(double %35, double 0x3FEAEB8C8764F0BA, double %163) + %165 = tail call double @llvm.fmuladd.f64(double %47, double 0xBFEEB42A9BCD5057, double %164) + %166 = tail call double @llvm.fmuladd.f64(double %59, double 0x3FDA9628D9C712B6, double %165) + %167 = tail call double @llvm.fmuladd.f64(double %16, double 0xBFE4F49E7F775887, double %6) + %168 = tail call double @llvm.fmuladd.f64(double %28, double 0xBFC2375F640F44DB, double %167) + %169 = tail call double @llvm.fmuladd.f64(double %40, double 0x3FEAEB8C8764F0BA, double %168) + %170 = tail call double @llvm.fmuladd.f64(double %52, double 0xBFEEB42A9BCD5057, double %169) + %171 = tail call double @llvm.fmuladd.f64(double %64, double 0x3FDA9628D9C712B6, double %170) + %172 = fmul double %29, 0xBFEFAC9E043842EF + %173 = tail call double @llvm.fmuladd.f64(double %17, double 0x3FE82F19BB3A28A1, double %172) + %174 = tail call double @llvm.fmuladd.f64(double %41, double 0x3FE14CEDF8BB580B, double %173) + %175 = tail call double @llvm.fmuladd.f64(double %53, double 0x3FD207E7FD768DBF, double %174) + %176 = tail call double @llvm.fmuladd.f64(double %65, double 0xBFED1BB48EEE2C13, double %175) + %177 = fmul double %30, 0xBFEFAC9E043842EF + %178 = tail call double @llvm.fmuladd.f64(double %18, double 0x3FE82F19BB3A28A1, double %177) + %179 = tail call double @llvm.fmuladd.f64(double %42, double 0x3FE14CEDF8BB580B, double %178) + %180 = tail call double @llvm.fmuladd.f64(double %54, double 0x3FD207E7FD768DBF, double %179) + %181 = tail call double @llvm.fmuladd.f64(double %66, double 0xBFED1BB48EEE2C13, double %180) + %182 = fsub double %166, %181 + %183 = getelementptr inbounds %struct.cmplx, ptr %1, i32 20 + store double %182, ptr %183, align 8, !tbaa !3 + %184 = fadd double %176, %171 + %185 = getelementptr inbounds %struct.cmplx, ptr %1, i32 20, i32 1 + store double %184, ptr %185, align 8, !tbaa !8 + %186 = fadd double %166, %181 + %187 = getelementptr inbounds %struct.cmplx, ptr %1, i32 35 + store double %186, ptr %187, align 8, !tbaa !3 + %188 = fsub double %171, %176 + %189 = getelementptr inbounds %struct.cmplx, ptr %1, i32 35, i32 1 + store double %188, ptr %189, align 8, !tbaa !8 + br label %191 + +190: ; preds = %191 + ret void + +191: ; preds = %3, %191 + %192 = phi i32 [ 1, %3 ], [ %470, %191 ] + %193 = getelementptr inbounds %struct.cmplx, ptr %0, i32 %192 + %194 = load double, ptr %193, align 8, !tbaa.struct !9 + %195 = getelementptr inbounds i8, ptr %193, i32 8 + %196 = load double, ptr %195, align 8, !tbaa.struct !11 + %197 = add nuw nsw i32 %192, 5 + %198 = getelementptr inbounds %struct.cmplx, ptr %0, i32 %197 + %199 = load double, ptr %198, align 8, !tbaa !3 + %200 = add nuw nsw i32 %192, 50 + %201 = getelementptr inbounds %struct.cmplx, ptr %0, i32 %200 + %202 = load double, ptr %201, align 8, !tbaa !3 + %203 = fadd double %199, %202 + %204 = getelementptr inbounds %struct.cmplx, ptr %0, i32 %197, i32 1 + %205 = load double, ptr %204, align 8, !tbaa !8 + %206 = getelementptr inbounds %struct.cmplx, ptr %0, i32 %200, i32 1 + %207 = load double, ptr %206, align 8, !tbaa !8 + %208 = fadd double %205, %207 + %209 = fsub double %199, %202 + %210 = fsub double %205, %207 + %211 = add nuw nsw i32 %192, 10 + %212 = getelementptr inbounds %struct.cmplx, ptr %0, i32 %211 + %213 = load double, ptr %212, align 8, !tbaa !3 + %214 = add nuw nsw i32 %192, 45 + %215 = getelementptr inbounds %struct.cmplx, ptr %0, i32 %214 + %216 = load double, ptr %215, align 8, !tbaa !3 + %217 = fadd double %213, %216 + %218 = getelementptr inbounds %struct.cmplx, ptr %0, i32 %211, i32 1 + %219 = load double, ptr %218, align 8, !tbaa !8 + %220 = getelementptr inbounds %struct.cmplx, ptr %0, i32 %214, i32 1 + %221 = load double, ptr %220, align 8, !tbaa !8 + %222 = fadd double %219, %221 + %223 = fsub double %213, %216 + %224 = fsub double %219, %221 + %225 = add nuw nsw i32 %192, 15 + %226 = getelementptr inbounds %struct.cmplx, ptr %0, i32 %225 + %227 = load double, ptr %226, align 8, !tbaa !3 + %228 = add nuw nsw i32 %192, 40 + %229 = getelementptr inbounds %struct.cmplx, ptr %0, i32 %228 + %230 = load double, ptr %229, align 8, !tbaa !3 + %231 = fadd double %227, %230 + %232 = getelementptr inbounds %struct.cmplx, ptr %0, i32 %225, i32 1 + %233 = load double, ptr %232, align 8, !tbaa !8 + %234 = getelementptr inbounds %struct.cmplx, ptr %0, i32 %228, i32 1 + %235 = load double, ptr %234, align 8, !tbaa !8 + %236 = fadd double %233, %235 + %237 = fsub double %227, %230 + %238 = fsub double %233, %235 + %239 = add nuw nsw i32 %192, 20 + %240 = getelementptr inbounds %struct.cmplx, ptr %0, i32 %239 + %241 = load double, ptr %240, align 8, !tbaa !3 + %242 = add nuw nsw i32 %192, 35 + %243 = getelementptr inbounds %struct.cmplx, ptr %0, i32 %242 + %244 = load double, ptr %243, align 8, !tbaa !3 + %245 = fadd double %241, %244 + %246 = getelementptr inbounds %struct.cmplx, ptr %0, i32 %239, i32 1 + %247 = load double, ptr %246, align 8, !tbaa !8 + %248 = getelementptr inbounds %struct.cmplx, ptr %0, i32 %242, i32 1 + %249 = load double, ptr %248, align 8, !tbaa !8 + %250 = fadd double %247, %249 + %251 = fsub double %241, %244 + %252 = fsub double %247, %249 + %253 = add nuw nsw i32 %192, 25 + %254 = getelementptr inbounds %struct.cmplx, ptr %0, i32 %253 + %255 = load double, ptr %254, align 8, !tbaa !3 + %256 = add nuw nsw i32 %192, 30 + %257 = getelementptr inbounds %struct.cmplx, ptr %0, i32 %256 + %258 = load double, ptr %257, align 8, !tbaa !3 + %259 = fadd double %255, %258 + %260 = getelementptr inbounds %struct.cmplx, ptr %0, i32 %253, i32 1 + %261 = load double, ptr %260, align 8, !tbaa !8 + %262 = getelementptr inbounds %struct.cmplx, ptr %0, i32 %256, i32 1 + %263 = load double, ptr %262, align 8, !tbaa !8 + %264 = fadd double %261, %263 + %265 = fsub double %255, %258 + %266 = fsub double %261, %263 + %267 = fadd double %194, %203 + %268 = fadd double %267, %217 + %269 = fadd double %268, %231 + %270 = fadd double %269, %245 + %271 = fadd double %270, %259 + %272 = getelementptr inbounds %struct.cmplx, ptr %1, i32 %192 + store double %271, ptr %272, align 8, !tbaa !3 + %273 = fadd double %196, %208 + %274 = fadd double %273, %222 + %275 = fadd double %274, %236 + %276 = fadd double %275, %250 + %277 = fadd double %276, %264 + %278 = getelementptr inbounds %struct.cmplx, ptr %1, i32 %192, i32 1 + store double %277, ptr %278, align 8, !tbaa !8 + %279 = tail call double @llvm.fmuladd.f64(double %203, double 0x3FEAEB8C8764F0BA, double %194) + %280 = tail call double @llvm.fmuladd.f64(double %217, double 0x3FDA9628D9C712B6, double %279) + %281 = tail call double @llvm.fmuladd.f64(double %231, double 0xBFC2375F640F44DB, double %280) + %282 = tail call double @llvm.fmuladd.f64(double %245, double 0xBFE4F49E7F775887, double %281) + %283 = tail call double @llvm.fmuladd.f64(double %259, double 0xBFEEB42A9BCD5057, double %282) + %284 = tail call double @llvm.fmuladd.f64(double %208, double 0x3FEAEB8C8764F0BA, double %196) + %285 = tail call double @llvm.fmuladd.f64(double %222, double 0x3FDA9628D9C712B6, double %284) + %286 = tail call double @llvm.fmuladd.f64(double %236, double 0xBFC2375F640F44DB, double %285) + %287 = tail call double @llvm.fmuladd.f64(double %250, double 0xBFE4F49E7F775887, double %286) + %288 = tail call double @llvm.fmuladd.f64(double %264, double 0xBFEEB42A9BCD5057, double %287) + %289 = fmul double %223, 0x3FED1BB48EEE2C13 + %290 = tail call double @llvm.fmuladd.f64(double %209, double 0x3FE14CEDF8BB580B, double %289) + %291 = tail call double @llvm.fmuladd.f64(double %237, double 0x3FEFAC9E043842EF, double %290) + %292 = tail call double @llvm.fmuladd.f64(double %251, double 0x3FE82F19BB3A28A1, double %291) + %293 = tail call double @llvm.fmuladd.f64(double %265, double 0x3FD207E7FD768DBF, double %292) + %294 = fmul double %224, 0x3FED1BB48EEE2C13 + %295 = tail call double @llvm.fmuladd.f64(double %210, double 0x3FE14CEDF8BB580B, double %294) + %296 = tail call double @llvm.fmuladd.f64(double %238, double 0x3FEFAC9E043842EF, double %295) + %297 = tail call double @llvm.fmuladd.f64(double %252, double 0x3FE82F19BB3A28A1, double %296) + %298 = tail call double @llvm.fmuladd.f64(double %266, double 0x3FD207E7FD768DBF, double %297) + %299 = fsub double %283, %298 + %300 = fadd double %293, %288 + %301 = fadd double %283, %298 + %302 = fsub double %288, %293 + %303 = add nsw i32 %192, -1 + %304 = getelementptr inbounds %struct.cmplx, ptr %2, i32 %303 + %305 = load double, ptr %304, align 8, !tbaa !3 + %306 = getelementptr inbounds %struct.cmplx, ptr %2, i32 %303, i32 1 + %307 = load double, ptr %306, align 8, !tbaa !8 + %308 = fneg double %307 + %309 = fmul double %300, %308 + %310 = tail call double @llvm.fmuladd.f64(double %305, double %299, double %309) + %311 = getelementptr inbounds %struct.cmplx, ptr %1, i32 %197 + store double %310, ptr %311, align 8, !tbaa !3 + %312 = fmul double %299, %307 + %313 = tail call double @llvm.fmuladd.f64(double %305, double %300, double %312) + %314 = getelementptr inbounds %struct.cmplx, ptr %1, i32 %197, i32 1 + store double %313, ptr %314, align 8, !tbaa !8 + %315 = getelementptr inbounds %struct.cmplx, ptr %2, i32 %242 + %316 = load double, ptr %315, align 8, !tbaa !3 + %317 = getelementptr inbounds %struct.cmplx, ptr %2, i32 %242, i32 1 + %318 = load double, ptr %317, align 8, !tbaa !8 + %319 = fneg double %318 + %320 = fmul double %302, %319 + %321 = tail call double @llvm.fmuladd.f64(double %316, double %301, double %320) + %322 = getelementptr inbounds %struct.cmplx, ptr %1, i32 %200 + store double %321, ptr %322, align 8, !tbaa !3 + %323 = fmul double %301, %318 + %324 = tail call double @llvm.fmuladd.f64(double %316, double %302, double %323) + %325 = getelementptr inbounds %struct.cmplx, ptr %1, i32 %200, i32 1 + store double %324, ptr %325, align 8, !tbaa !8 + %326 = tail call double @llvm.fmuladd.f64(double %203, double 0x3FDA9628D9C712B6, double %194) + %327 = tail call double @llvm.fmuladd.f64(double %217, double 0xBFE4F49E7F775887, double %326) + %328 = tail call double @llvm.fmuladd.f64(double %231, double 0xBFEEB42A9BCD5057, double %327) + %329 = tail call double @llvm.fmuladd.f64(double %245, double 0xBFC2375F640F44DB, double %328) + %330 = tail call double @llvm.fmuladd.f64(double %259, double 0x3FEAEB8C8764F0BA, double %329) + %331 = tail call double @llvm.fmuladd.f64(double %208, double 0x3FDA9628D9C712B6, double %196) + %332 = tail call double @llvm.fmuladd.f64(double %222, double 0xBFE4F49E7F775887, double %331) + %333 = tail call double @llvm.fmuladd.f64(double %236, double 0xBFEEB42A9BCD5057, double %332) + %334 = tail call double @llvm.fmuladd.f64(double %250, double 0xBFC2375F640F44DB, double %333) + %335 = tail call double @llvm.fmuladd.f64(double %264, double 0x3FEAEB8C8764F0BA, double %334) + %336 = fmul double %223, 0x3FE82F19BB3A28A1 + %337 = tail call double @llvm.fmuladd.f64(double %209, double 0x3FED1BB48EEE2C13, double %336) + %338 = tail call double @llvm.fmuladd.f64(double %237, double 0xBFD207E7FD768DBF, double %337) + %339 = tail call double @llvm.fmuladd.f64(double %251, double 0xBFEFAC9E043842EF, double %338) + %340 = tail call double @llvm.fmuladd.f64(double %265, double 0xBFE14CEDF8BB580B, double %339) + %341 = fmul double %224, 0x3FE82F19BB3A28A1 + %342 = tail call double @llvm.fmuladd.f64(double %210, double 0x3FED1BB48EEE2C13, double %341) + %343 = tail call double @llvm.fmuladd.f64(double %238, double 0xBFD207E7FD768DBF, double %342) + %344 = tail call double @llvm.fmuladd.f64(double %252, double 0xBFEFAC9E043842EF, double %343) + %345 = tail call double @llvm.fmuladd.f64(double %266, double 0xBFE14CEDF8BB580B, double %344) + %346 = fsub double %330, %345 + %347 = fadd double %340, %335 + %348 = fadd double %330, %345 + %349 = fsub double %335, %340 + %350 = add nuw nsw i32 %192, 3 + %351 = getelementptr inbounds %struct.cmplx, ptr %2, i32 %350 + %352 = load double, ptr %351, align 8, !tbaa !3 + %353 = getelementptr inbounds %struct.cmplx, ptr %2, i32 %350, i32 1 + %354 = load double, ptr %353, align 8, !tbaa !8 + %355 = fneg double %354 + %356 = fmul double %347, %355 + %357 = tail call double @llvm.fmuladd.f64(double %352, double %346, double %356) + %358 = getelementptr inbounds %struct.cmplx, ptr %1, i32 %211 + store double %357, ptr %358, align 8, !tbaa !3 + %359 = fmul double %346, %354 + %360 = tail call double @llvm.fmuladd.f64(double %352, double %347, double %359) + %361 = getelementptr inbounds %struct.cmplx, ptr %1, i32 %211, i32 1 + store double %360, ptr %361, align 8, !tbaa !8 + %362 = add nuw nsw i32 %192, 31 + %363 = getelementptr inbounds %struct.cmplx, ptr %2, i32 %362 + %364 = load double, ptr %363, align 8, !tbaa !3 + %365 = getelementptr inbounds %struct.cmplx, ptr %2, i32 %362, i32 1 + %366 = load double, ptr %365, align 8, !tbaa !8 + %367 = fneg double %366 + %368 = fmul double %349, %367 + %369 = tail call double @llvm.fmuladd.f64(double %364, double %348, double %368) + %370 = getelementptr inbounds %struct.cmplx, ptr %1, i32 %214 + store double %369, ptr %370, align 8, !tbaa !3 + %371 = fmul double %348, %366 + %372 = tail call double @llvm.fmuladd.f64(double %364, double %349, double %371) + %373 = getelementptr inbounds %struct.cmplx, ptr %1, i32 %214, i32 1 + store double %372, ptr %373, align 8, !tbaa !8 + %374 = tail call double @llvm.fmuladd.f64(double %203, double 0xBFC2375F640F44DB, double %194) + %375 = tail call double @llvm.fmuladd.f64(double %217, double 0xBFEEB42A9BCD5057, double %374) + %376 = tail call double @llvm.fmuladd.f64(double %231, double 0x3FDA9628D9C712B6, double %375) + %377 = tail call double @llvm.fmuladd.f64(double %245, double 0x3FEAEB8C8764F0BA, double %376) + %378 = tail call double @llvm.fmuladd.f64(double %259, double 0xBFE4F49E7F775887, double %377) + %379 = tail call double @llvm.fmuladd.f64(double %208, double 0xBFC2375F640F44DB, double %196) + %380 = tail call double @llvm.fmuladd.f64(double %222, double 0xBFEEB42A9BCD5057, double %379) + %381 = tail call double @llvm.fmuladd.f64(double %236, double 0x3FDA9628D9C712B6, double %380) + %382 = tail call double @llvm.fmuladd.f64(double %250, double 0x3FEAEB8C8764F0BA, double %381) + %383 = tail call double @llvm.fmuladd.f64(double %264, double 0xBFE4F49E7F775887, double %382) + %384 = fmul double %223, 0xBFD207E7FD768DBF + %385 = tail call double @llvm.fmuladd.f64(double %209, double 0x3FEFAC9E043842EF, double %384) + %386 = tail call double @llvm.fmuladd.f64(double %237, double 0xBFED1BB48EEE2C13, double %385) + %387 = tail call double @llvm.fmuladd.f64(double %251, double 0x3FE14CEDF8BB580B, double %386) + %388 = tail call double @llvm.fmuladd.f64(double %265, double 0x3FE82F19BB3A28A1, double %387) + %389 = fmul double %224, 0xBFD207E7FD768DBF + %390 = tail call double @llvm.fmuladd.f64(double %210, double 0x3FEFAC9E043842EF, double %389) + %391 = tail call double @llvm.fmuladd.f64(double %238, double 0xBFED1BB48EEE2C13, double %390) + %392 = tail call double @llvm.fmuladd.f64(double %252, double 0x3FE14CEDF8BB580B, double %391) + %393 = tail call double @llvm.fmuladd.f64(double %266, double 0x3FE82F19BB3A28A1, double %392) + %394 = fsub double %378, %393 + %395 = fadd double %388, %383 + %396 = fadd double %378, %393 + %397 = fsub double %383, %388 + %398 = add nuw nsw i32 %192, 7 + %399 = getelementptr inbounds %struct.cmplx, ptr %2, i32 %398 + %400 = load double, ptr %399, align 8, !tbaa !3 + %401 = getelementptr inbounds %struct.cmplx, ptr %2, i32 %398, i32 1 + %402 = load double, ptr %401, align 8, !tbaa !8 + %403 = fneg double %402 + %404 = fmul double %395, %403 + %405 = tail call double @llvm.fmuladd.f64(double %400, double %394, double %404) + %406 = getelementptr inbounds %struct.cmplx, ptr %1, i32 %225 + store double %405, ptr %406, align 8, !tbaa !3 + %407 = fmul double %394, %402 + %408 = tail call double @llvm.fmuladd.f64(double %400, double %395, double %407) + %409 = getelementptr inbounds %struct.cmplx, ptr %1, i32 %225, i32 1 + store double %408, ptr %409, align 8, !tbaa !8 + %410 = add nuw nsw i32 %192, 27 + %411 = getelementptr inbounds %struct.cmplx, ptr %2, i32 %410 + %412 = load double, ptr %411, align 8, !tbaa !3 + %413 = getelementptr inbounds %struct.cmplx, ptr %2, i32 %410, i32 1 + %414 = load double, ptr %413, align 8, !tbaa !8 + %415 = fneg double %414 + %416 = fmul double %397, %415 + %417 = tail call double @llvm.fmuladd.f64(double %412, double %396, double %416) + %418 = getelementptr inbounds %struct.cmplx, ptr %1, i32 %228 + store double %417, ptr %418, align 8, !tbaa !3 + %419 = fmul double %396, %414 + %420 = tail call double @llvm.fmuladd.f64(double %412, double %397, double %419) + %421 = getelementptr inbounds %struct.cmplx, ptr %1, i32 %228, i32 1 + store double %420, ptr %421, align 8, !tbaa !8 + %422 = tail call double @llvm.fmuladd.f64(double %203, double 0xBFE4F49E7F775887, double %194) + %423 = tail call double @llvm.fmuladd.f64(double %217, double 0xBFC2375F640F44DB, double %422) + %424 = tail call double @llvm.fmuladd.f64(double %231, double 0x3FEAEB8C8764F0BA, double %423) + %425 = tail call double @llvm.fmuladd.f64(double %245, double 0xBFEEB42A9BCD5057, double %424) + %426 = tail call double @llvm.fmuladd.f64(double %259, double 0x3FDA9628D9C712B6, double %425) + %427 = tail call double @llvm.fmuladd.f64(double %208, double 0xBFE4F49E7F775887, double %196) + %428 = tail call double @llvm.fmuladd.f64(double %222, double 0xBFC2375F640F44DB, double %427) + %429 = tail call double @llvm.fmuladd.f64(double %236, double 0x3FEAEB8C8764F0BA, double %428) + %430 = tail call double @llvm.fmuladd.f64(double %250, double 0xBFEEB42A9BCD5057, double %429) + %431 = tail call double @llvm.fmuladd.f64(double %264, double 0x3FDA9628D9C712B6, double %430) + %432 = fmul double %223, 0xBFEFAC9E043842EF + %433 = tail call double @llvm.fmuladd.f64(double %209, double 0x3FE82F19BB3A28A1, double %432) + %434 = tail call double @llvm.fmuladd.f64(double %237, double 0x3FE14CEDF8BB580B, double %433) + %435 = tail call double @llvm.fmuladd.f64(double %251, double 0x3FD207E7FD768DBF, double %434) + %436 = tail call double @llvm.fmuladd.f64(double %265, double 0xBFED1BB48EEE2C13, double %435) + %437 = fmul double %224, 0xBFEFAC9E043842EF + %438 = tail call double @llvm.fmuladd.f64(double %210, double 0x3FE82F19BB3A28A1, double %437) + %439 = tail call double @llvm.fmuladd.f64(double %238, double 0x3FE14CEDF8BB580B, double %438) + %440 = tail call double @llvm.fmuladd.f64(double %252, double 0x3FD207E7FD768DBF, double %439) + %441 = tail call double @llvm.fmuladd.f64(double %266, double 0xBFED1BB48EEE2C13, double %440) + %442 = fsub double %426, %441 + %443 = fadd double %436, %431 + %444 = fadd double %426, %441 + %445 = fsub double %431, %436 + %446 = add nuw nsw i32 %192, 11 + %447 = getelementptr inbounds %struct.cmplx, ptr %2, i32 %446 + %448 = load double, ptr %447, align 8, !tbaa !3 + %449 = getelementptr inbounds %struct.cmplx, ptr %2, i32 %446, i32 1 + %450 = load double, ptr %449, align 8, !tbaa !8 + %451 = fneg double %450 + %452 = fmul double %443, %451 + %453 = tail call double @llvm.fmuladd.f64(double %448, double %442, double %452) + %454 = getelementptr inbounds %struct.cmplx, ptr %1, i32 %239 + store double %453, ptr %454, align 8, !tbaa !3 + %455 = fmul double %442, %450 + %456 = tail call double @llvm.fmuladd.f64(double %448, double %443, double %455) + %457 = getelementptr inbounds %struct.cmplx, ptr %1, i32 %239, i32 1 + store double %456, ptr %457, align 8, !tbaa !8 + %458 = add nuw nsw i32 %192, 23 + %459 = getelementptr inbounds %struct.cmplx, ptr %2, i32 %458 + %460 = load double, ptr %459, align 8, !tbaa !3 + %461 = getelementptr inbounds %struct.cmplx, ptr %2, i32 %458, i32 1 + %462 = load double, ptr %461, align 8, !tbaa !8 + %463 = fneg double %462 + %464 = fmul double %445, %463 + %465 = tail call double @llvm.fmuladd.f64(double %460, double %444, double %464) + %466 = getelementptr inbounds %struct.cmplx, ptr %1, i32 %242 + store double %465, ptr %466, align 8, !tbaa !3 + %467 = fmul double %444, %462 + %468 = tail call double @llvm.fmuladd.f64(double %460, double %445, double %467) + %469 = getelementptr inbounds %struct.cmplx, ptr %1, i32 %242, i32 1 + store double %468, ptr %469, align 8, !tbaa !8 + %470 = add nuw nsw i32 %192, 1 + %471 = icmp eq i32 %470, 5 + br i1 %471, label %190, label %191, !llvm.loop !12 +} + +; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) +declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1 + +; Function Attrs: mustprogress nocallback nofree nosync nounwind speculatable willreturn memory(none) +declare double @llvm.fmuladd.f64(double, double, double) #4 + +attributes #0 = { nofree nounwind memory(readwrite, argmem: none) uwtable "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="e500" "target-features"="+spe,-altivec,-bpermd,-crbits,-crypto,-direct-move,-extdiv,-htm,-isa-v206-instructions,-isa-v207-instructions,-isa-v30-instructions,-power8-vector,-power9-vector,-privileged,-quadword-atomics,-rop-protect,-vsx" } +attributes #1 = { mustprogress nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) } +attributes #2 = { mustprogress nofree nounwind willreturn allockind("alloc,uninitialized") allocsize(0) memory(inaccessiblemem: readwrite) "alloc-family"="malloc" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="e500" "target-features"="+spe,-altivec,-bpermd,-crbits,-crypto,-direct-move,-extdiv,-htm,-isa-v206-instructions,-isa-v207-instructions,-isa-v30-instructions,-power8-vector,-power9-vector,-privileged,-quadword-atomics,-rop-protect,-vsx" } +attributes #3 = { nofree noinline nosync nounwind memory(argmem: readwrite) uwtable "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="e500" "target-features"="+spe,-altivec,-bpermd,-crbits,-crypto,-direct-move,-extdiv,-htm,-isa-v206-instructions,-isa-v207-instructions,-isa-v30-instructions,-power8-vector,-power9-vector,-privileged,-quadword-atomics,-rop-protect,-vsx" } +attributes #4 = { mustprogress nocallback nofree nosync nounwind speculatable willreturn memory(none) } +attributes #5 = { nounwind } + +!llvm.module.flags = !{!0, !1} +!llvm.ident = !{!2} + +!0 = !{i32 1, !"wchar_size", i32 4} +!1 = !{i32 7, !"uwtable", i32 2} +!2 = !{!"clang version 17.0.0 (https://github.com/llvm/llvm-project.git 2d731904170f1e3b378bfc556d939032e50c9a3d)"} +!3 = !{!4, !5, i64 0} +!4 = !{!"cmplx", !5, i64 0, !5, i64 8} +!5 = !{!"double", !6, i64 0} +!6 = !{!"omnipotent char", !7, i64 0} +!7 = !{!"Simple C/C++ TBAA"} +!8 = !{!4, !5, i64 8} +!9 = !{i64 0, i64 8, !10, i64 8, i64 8, !10} +!10 = !{!5, !5, i64 0} +!11 = !{i64 0, i64 8, !10} +!12 = distinct !{!12, !13} +!13 = !{!"llvm.loop.mustprogress"} +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; SPE: {{.*}}